prompt
stringlengths 43
25.9k
| completion
stringlengths 7
362
| api
stringlengths 18
90
|
---|---|---|
from langchain.chains import LLMSummarizationCheckerChain
from langchain_openai import OpenAI
llm = OpenAI(temperature=0)
checker_chain = LLMSummarizationCheckerChain.from_llm(llm, verbose=True, max_checks=2)
text = """
Your 9-year old might like these recent discoveries made by The James Webb Space Telescope (JWST):
β’ In 2023, The JWST spotted a number of galaxies nicknamed "green peas." They were given this name because they are small, round, and green, like peas.
β’ The telescope captured images of galaxies that are over 13 billion years old. This means that the light from these galaxies has been traveling for over 13 billion years to reach us.
β’ JWST took the very first pictures of a planet outside of our own solar system. These distant worlds are called "exoplanets." Exo means "from outside."
These discoveries can spark a child's imagination about the infinite wonders of the universe."""
checker_chain.run(text)
from langchain.chains import LLMSummarizationCheckerChain
from langchain_openai import OpenAI
llm = OpenAI(temperature=0)
checker_chain = LLMSummarizationCheckerChain.from_llm(llm, verbose=True, max_checks=3)
text = "The Greenland Sea is an outlying portion of the Arctic Ocean located between Iceland, Norway, the Svalbard archipelago and Greenland. It has an area of 465,000 square miles and is one of five oceans in the world, alongside the Pacific Ocean, Atlantic Ocean, Indian Ocean, and the Southern Ocean. It is the smallest of the five oceans and is covered almost entirely by water, some of which is frozen in the form of glaciers and icebergs. The sea is named after the island of Greenland, and is the Arctic Ocean's main outlet to the Atlantic. It is often frozen over so navigation is limited, and is considered the northern branch of the Norwegian Sea."
checker_chain.run(text)
from langchain.chains import LLMSummarizationCheckerChain
from langchain_openai import OpenAI
llm = | OpenAI(temperature=0) | langchain_openai.OpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet boto3')
from langchain_community.document_loaders import S3DirectoryLoader
loader = S3DirectoryLoader("testing-hwc")
loader.load()
loader = | S3DirectoryLoader("testing-hwc", prefix="fake") | langchain_community.document_loaders.S3DirectoryLoader |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet singlestoredb')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import SingleStoreDB
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
os.environ["SINGLESTOREDB_URL"] = "root:pass@localhost:3306/db"
docsearch = SingleStoreDB.from_documents(
docs,
embeddings,
table_name="notebook", # use table with a custom name
)
query = "What did the president say about Ketanji Brown Jackson"
docs = docsearch.similarity_search(query) # Find documents that correspond to the query
print(docs[0].page_content)
get_ipython().run_line_magic('pip', 'install -U langchain openai singlestoredb langchain-experimental # (newest versions required for multi-modal)')
import os
from langchain_community.vectorstores import SingleStoreDB
from langchain_experimental.open_clip import OpenCLIPEmbeddings
os.environ["SINGLESTOREDB_URL"] = "root:pass@localhost:3306/db"
TEST_IMAGES_DIR = "../../modules/images"
docsearch = SingleStoreDB( | OpenCLIPEmbeddings() | langchain_experimental.open_clip.OpenCLIPEmbeddings |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
from langchain.model_laboratory import ModelLaboratory
from langchain.prompts import PromptTemplate
from langchain_community.llms import Cohere, HuggingFaceHub
from langchain_openai import OpenAI
import getpass
import os
os.environ["COHERE_API_KEY"] = getpass.getpass("Cohere API Key:")
os.environ["OPENAI_API_KEY"] = getpass.getpass("Open API Key:")
os.environ["HUGGINGFACEHUB_API_TOKEN"] = getpass.getpass("Hugging Face API Key:")
llms = [
OpenAI(temperature=0),
Cohere(temperature=0),
HuggingFaceHub(repo_id="google/flan-t5-xl", model_kwargs={"temperature": 1}),
]
model_lab = | ModelLaboratory.from_llms(llms) | langchain.model_laboratory.ModelLaboratory.from_llms |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet aim')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-openai')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet google-search-results')
import os
from datetime import datetime
from langchain.callbacks import AimCallbackHandler, StdOutCallbackHandler
from langchain_openai import OpenAI
os.environ["OPENAI_API_KEY"] = "..."
os.environ["SERPAPI_API_KEY"] = "..."
session_group = datetime.now().strftime("%m.%d.%Y_%H.%M.%S")
aim_callback = AimCallbackHandler(
repo=".",
experiment_name="scenario 1: OpenAI LLM",
)
callbacks = [StdOutCallbackHandler(), aim_callback]
llm = OpenAI(temperature=0, callbacks=callbacks)
llm_result = llm.generate(["Tell me a joke", "Tell me a poem"] * 3)
aim_callback.flush_tracker(
langchain_asset=llm,
experiment_name="scenario 2: Chain with multiple SubChains on multiple generations",
)
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
template = """You are a playwright. Given the title of play, it is your job to write a synopsis for that title.
Title: {title}
Playwright: This is a synopsis for the above play:"""
prompt_template = PromptTemplate(input_variables=["title"], template=template)
synopsis_chain = LLMChain(llm=llm, prompt=prompt_template, callbacks=callbacks)
test_prompts = [
{
"title": "documentary about good video games that push the boundary of game design"
},
{"title": "the phenomenon behind the remarkable speed of cheetahs"},
{"title": "the best in class mlops tooling"},
]
synopsis_chain.apply(test_prompts)
aim_callback.flush_tracker(
langchain_asset=synopsis_chain, experiment_name="scenario 3: Agent with Tools"
)
from langchain.agents import AgentType, initialize_agent, load_tools
tools = | load_tools(["serpapi", "llm-math"], llm=llm, callbacks=callbacks) | langchain.agents.load_tools |
import os
from langchain.chains import ConversationalRetrievalChain
from langchain_community.vectorstores import Vectara
from langchain_openai import OpenAI
from langchain_community.document_loaders import TextLoader
loader = TextLoader("state_of_the_union.txt")
documents = loader.load()
vectara = Vectara.from_documents(documents, embedding=None)
from langchain.memory import ConversationBufferMemory
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
openai_api_key = os.environ["OPENAI_API_KEY"]
llm = OpenAI(openai_api_key=openai_api_key, temperature=0)
retriever = vectara.as_retriever()
d = retriever.get_relevant_documents(
"What did the president say about Ketanji Brown Jackson", k=2
)
print(d)
bot = ConversationalRetrievalChain.from_llm(
llm, retriever, memory=memory, verbose=False
)
query = "What did the president say about Ketanji Brown Jackson"
result = bot.invoke({"question": query})
result["answer"]
query = "Did he mention who she suceeded"
result = bot.invoke({"question": query})
result["answer"]
bot = ConversationalRetrievalChain.from_llm(
OpenAI(temperature=0), vectara.as_retriever()
)
chat_history = []
query = "What did the president say about Ketanji Brown Jackson"
result = bot.invoke({"question": query, "chat_history": chat_history})
result["answer"]
chat_history = [(query, result["answer"])]
query = "Did he mention who she suceeded"
result = bot.invoke({"question": query, "chat_history": chat_history})
result["answer"]
bot = ConversationalRetrievalChain.from_llm(
llm, vectara.as_retriever(), return_source_documents=True
)
chat_history = []
query = "What did the president say about Ketanji Brown Jackson"
result = bot.invoke({"question": query, "chat_history": chat_history})
result["source_documents"][0]
from langchain.chains import LLMChain
from langchain.chains.conversational_retrieval.prompts import CONDENSE_QUESTION_PROMPT
from langchain.chains.question_answering import load_qa_chain
question_generator = | LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT) | langchain.chains.llm.LLMChain |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet scann')
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import ScaNN
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = HuggingFaceEmbeddings()
db = ScaNN.from_documents(docs, embeddings)
query = "What did the president say about Ketanji Brown Jackson"
docs = db.similarity_search(query)
docs[0]
from langchain.chains import RetrievalQA
from langchain_community.chat_models import google_palm
palm_client = | google_palm.ChatGooglePalm(google_api_key="YOUR_GOOGLE_PALM_API_KEY") | langchain_community.chat_models.google_palm.ChatGooglePalm |
from langchain_community.document_loaders import RoamLoader
loader = | RoamLoader("Roam_DB") | langchain_community.document_loaders.RoamLoader |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
from langchain.prompts import PromptTemplate
from langchain_core.runnables import ConfigurableField
from langchain_openai import ChatOpenAI
model = ChatOpenAI(temperature=0).configurable_fields(
temperature=ConfigurableField(
id="llm_temperature",
name="LLM Temperature",
description="The temperature of the LLM",
)
)
model.invoke("pick a random number")
model.with_config(configurable={"llm_temperature": 0.9}).invoke("pick a random number")
prompt = PromptTemplate.from_template("Pick a random number above {x}")
chain = prompt | model
chain.invoke({"x": 0})
chain.with_config(configurable={"llm_temperature": 0.9}).invoke({"x": 0})
from langchain.runnables.hub import HubRunnable
prompt = | HubRunnable("rlm/rag-prompt") | langchain.runnables.hub.HubRunnable |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-google-cloud-sql-pg langchain-google-vertexai')
from google.colab import auth
auth.authenticate_user()
PROJECT_ID = "my-project-id" # @param {type:"string"}
get_ipython().system('gcloud config set project {PROJECT_ID}')
get_ipython().system('gcloud services enable sqladmin.googleapis.com')
REGION = "us-central1" # @param {type: "string"}
INSTANCE = "my-pg-instance" # @param {type: "string"}
DATABASE = "my-database" # @param {type: "string"}
TABLE_NAME = "vector_store" # @param {type: "string"}
from langchain_google_cloud_sql_pg import PostgreSQLEngine
engine = await PostgreSQLEngine.afrom_instance(
project_id=PROJECT_ID, region=REGION, instance=INSTANCE, database=DATABASE
)
from langchain_google_cloud_sql_pg import PostgreSQLEngine
await engine.ainit_vectorstore_table(
table_name=TABLE_NAME,
vector_size=768, # Vector size for VertexAI model(textembedding-gecko@latest)
)
get_ipython().system('gcloud services enable aiplatform.googleapis.com')
from langchain_google_vertexai import VertexAIEmbeddings
embedding = VertexAIEmbeddings(
model_name="textembedding-gecko@latest", project=PROJECT_ID
)
from langchain_google_cloud_sql_pg import PostgresVectorStore
store = await PostgresVectorStore.create( # Use .create() to initialize an async vector store
engine=engine,
table_name=TABLE_NAME,
embedding_service=embedding,
)
import uuid
all_texts = ["Apples and oranges", "Cars and airplanes", "Pineapple", "Train", "Banana"]
metadatas = [{"len": len(t)} for t in all_texts]
ids = [str(uuid.uuid4()) for _ in all_texts]
await store.aadd_texts(all_texts, metadatas=metadatas, ids=ids)
await store.adelete([ids[1]])
query = "I'd like a fruit."
docs = await store.asimilarity_search(query)
print(docs)
query_vector = embedding.embed_query(query)
docs = await store.asimilarity_search_by_vector(query_vector, k=2)
print(docs)
from langchain_google_cloud_sql_pg.indexes import IVFFlatIndex
index = | IVFFlatIndex() | langchain_google_cloud_sql_pg.indexes.IVFFlatIndex |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet modal')
get_ipython().system('modal token new')
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_community.llms import Modal
template = """Question: {question}
Answer: Let's think step by step."""
prompt = | PromptTemplate.from_template(template) | langchain.prompts.PromptTemplate.from_template |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet google-search-results')
import os
from langchain_community.tools.google_scholar import GoogleScholarQueryRun
from langchain_community.utilities.google_scholar import GoogleScholarAPIWrapper
os.environ["SERP_API_KEY"] = ""
tool = GoogleScholarQueryRun(api_wrapper= | GoogleScholarAPIWrapper() | langchain_community.utilities.google_scholar.GoogleScholarAPIWrapper |
model_url = "http://localhost:5000"
from langchain.chains import LLMChain
from langchain.globals import set_debug
from langchain.prompts import PromptTemplate
from langchain_community.llms import TextGen
set_debug(True)
template = """Question: {question}
Answer: Let's think step by step."""
prompt = | PromptTemplate.from_template(template) | langchain.prompts.PromptTemplate.from_template |
def pretty_print_docs(docs):
print(
f"\n{'-' * 100}\n".join(
[f"Document {i+1}:\n\n" + d.page_content for i, d in enumerate(docs)]
)
)
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import FAISS
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
documents = TextLoader("../../state_of_the_union.txt").load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
retriever = FAISS.from_documents(texts, OpenAIEmbeddings()).as_retriever()
docs = retriever.get_relevant_documents(
"What did the president say about Ketanji Brown Jackson"
)
pretty_print_docs(docs)
from langchain.retrievers import ContextualCompressionRetriever
from langchain.retrievers.document_compressors import LLMChainExtractor
from langchain_openai import OpenAI
llm = OpenAI(temperature=0)
compressor = LLMChainExtractor.from_llm(llm)
compression_retriever = ContextualCompressionRetriever(
base_compressor=compressor, base_retriever=retriever
)
compressed_docs = compression_retriever.get_relevant_documents(
"What did the president say about Ketanji Jackson Brown"
)
pretty_print_docs(compressed_docs)
from langchain.retrievers.document_compressors import LLMChainFilter
_filter = LLMChainFilter.from_llm(llm)
compression_retriever = ContextualCompressionRetriever(
base_compressor=_filter, base_retriever=retriever
)
compressed_docs = compression_retriever.get_relevant_documents(
"What did the president say about Ketanji Jackson Brown"
)
pretty_print_docs(compressed_docs)
from langchain.retrievers.document_compressors import EmbeddingsFilter
from langchain_openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
embeddings_filter = EmbeddingsFilter(embeddings=embeddings, similarity_threshold=0.76)
compression_retriever = ContextualCompressionRetriever(
base_compressor=embeddings_filter, base_retriever=retriever
)
compressed_docs = compression_retriever.get_relevant_documents(
"What did the president say about Ketanji Jackson Brown"
)
pretty_print_docs(compressed_docs)
from langchain.retrievers.document_compressors import DocumentCompressorPipeline
from langchain_community.document_transformers import EmbeddingsRedundantFilter
from langchain_text_splitters import CharacterTextSplitter
splitter = | CharacterTextSplitter(chunk_size=300, chunk_overlap=0, separator=". ") | langchain_text_splitters.CharacterTextSplitter |
import os
os.environ["EXA_API_KEY"] = "..."
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-exa')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
from langchain_core.prompts import PromptTemplate
from langchain_core.runnables import RunnableParallel, RunnablePassthrough
from langchain_exa import ExaSearchRetriever, TextContentsOptions
from langchain_openai import ChatOpenAI
retriever = ExaSearchRetriever(
k=5, text_contents_options=TextContentsOptions(max_length=200)
)
prompt = PromptTemplate.from_template(
"""Answer the following query based on the following context:
query: {query}
<context>
{context}
</context"""
)
llm = ChatOpenAI()
chain = (
RunnableParallel({"context": retriever, "query": RunnablePassthrough()})
| prompt
| llm
)
chain.invoke("When is the best time to visit japan?")
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-exa')
from exa_py import Exa
from langchain.agents import tool
exa = Exa(api_key=os.environ["EXA_API_KEY"])
@tool
def search(query: str):
"""Search for a webpage based on the query."""
return exa.search(f"{query}", use_autoprompt=True, num_results=5)
@tool
def find_similar(url: str):
"""Search for webpages similar to a given URL.
The url passed in should be a URL returned from `search`.
"""
return exa.find_similar(url, num_results=5)
@tool
def get_contents(ids: list[str]):
"""Get the contents of a webpage.
The ids passed in should be a list of ids returned from `search`.
"""
return exa.get_contents(ids)
tools = [search, get_contents, find_similar]
from langchain.agents import AgentExecutor, OpenAIFunctionsAgent
from langchain_core.messages import SystemMessage
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(temperature=0)
system_message = | SystemMessage(
content="You are a web researcher who answers user questions by looking up information on the internet and retrieving contents of helpful documents. Cite your sources."
) | langchain_core.messages.SystemMessage |
from typing import List
from langchain.prompts.chat import (
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from langchain.schema import (
AIMessage,
BaseMessage,
HumanMessage,
SystemMessage,
)
from langchain_openai import ChatOpenAI
class CAMELAgent:
def __init__(
self,
system_message: SystemMessage,
model: ChatOpenAI,
) -> None:
self.system_message = system_message
self.model = model
self.init_messages()
def reset(self) -> None:
self.init_messages()
return self.stored_messages
def init_messages(self) -> None:
self.stored_messages = [self.system_message]
def update_messages(self, message: BaseMessage) -> List[BaseMessage]:
self.stored_messages.append(message)
return self.stored_messages
def step(
self,
input_message: HumanMessage,
) -> AIMessage:
messages = self.update_messages(input_message)
output_message = self.model(messages)
self.update_messages(output_message)
return output_message
import os
os.environ["OPENAI_API_KEY"] = ""
assistant_role_name = "Python Programmer"
user_role_name = "Stock Trader"
task = "Develop a trading bot for the stock market"
word_limit = 50 # word limit for task brainstorming
task_specifier_sys_msg = SystemMessage(content="You can make a task more specific.")
task_specifier_prompt = """Here is a task that {assistant_role_name} will help {user_role_name} to complete: {task}.
Please make it more specific. Be creative and imaginative.
Please reply with the specified task in {word_limit} words or less. Do not add anything else."""
task_specifier_template = HumanMessagePromptTemplate.from_template(
template=task_specifier_prompt
)
task_specify_agent = CAMELAgent(task_specifier_sys_msg, ChatOpenAI(temperature=1.0))
task_specifier_msg = task_specifier_template.format_messages(
assistant_role_name=assistant_role_name,
user_role_name=user_role_name,
task=task,
word_limit=word_limit,
)[0]
specified_task_msg = task_specify_agent.step(task_specifier_msg)
print(f"Specified task: {specified_task_msg.content}")
specified_task = specified_task_msg.content
assistant_inception_prompt = """Never forget you are a {assistant_role_name} and I am a {user_role_name}. Never flip roles! Never instruct me!
We share a common interest in collaborating to successfully complete a task.
You must help me to complete the task.
Here is the task: {task}. Never forget our task!
I must instruct you based on your expertise and my needs to complete the task.
I must give you one instruction at a time.
You must write a specific solution that appropriately completes the requested instruction.
You must decline my instruction honestly if you cannot perform the instruction due to physical, moral, legal reasons or your capability and explain the reasons.
Do not add anything else other than your solution to my instruction.
You are never supposed to ask me any questions you only answer questions.
You are never supposed to reply with a flake solution. Explain your solutions.
Your solution must be declarative sentences and simple present tense.
Unless I say the task is completed, you should always start with:
Solution: <YOUR_SOLUTION>
<YOUR_SOLUTION> should be specific and provide preferable implementations and examples for task-solving.
Always end <YOUR_SOLUTION> with: Next request."""
user_inception_prompt = """Never forget you are a {user_role_name} and I am a {assistant_role_name}. Never flip roles! You will always instruct me.
We share a common interest in collaborating to successfully complete a task.
I must help you to complete the task.
Here is the task: {task}. Never forget our task!
You must instruct me based on my expertise and your needs to complete the task ONLY in the following two ways:
1. Instruct with a necessary input:
Instruction: <YOUR_INSTRUCTION>
Input: <YOUR_INPUT>
2. Instruct without any input:
Instruction: <YOUR_INSTRUCTION>
Input: None
The "Instruction" describes a task or question. The paired "Input" provides further context or information for the requested "Instruction".
You must give me one instruction at a time.
I must write a response that appropriately completes the requested instruction.
I must decline your instruction honestly if I cannot perform the instruction due to physical, moral, legal reasons or my capability and explain the reasons.
You should instruct me not ask me questions.
Now you must start to instruct me using the two ways described above.
Do not add anything else other than your instruction and the optional corresponding input!
Keep giving me instructions and necessary inputs until you think the task is completed.
When the task is completed, you must only reply with a single word <CAMEL_TASK_DONE>.
Never say <CAMEL_TASK_DONE> unless my responses have solved your task."""
def get_sys_msgs(assistant_role_name: str, user_role_name: str, task: str):
assistant_sys_template = SystemMessagePromptTemplate.from_template(
template=assistant_inception_prompt
)
assistant_sys_msg = assistant_sys_template.format_messages(
assistant_role_name=assistant_role_name,
user_role_name=user_role_name,
task=task,
)[0]
user_sys_template = SystemMessagePromptTemplate.from_template(
template=user_inception_prompt
)
user_sys_msg = user_sys_template.format_messages(
assistant_role_name=assistant_role_name,
user_role_name=user_role_name,
task=task,
)[0]
return assistant_sys_msg, user_sys_msg
assistant_sys_msg, user_sys_msg = get_sys_msgs(
assistant_role_name, user_role_name, specified_task
)
assistant_agent = CAMELAgent(assistant_sys_msg, ChatOpenAI(temperature=0.2))
user_agent = CAMELAgent(user_sys_msg, ChatOpenAI(temperature=0.2))
assistant_agent.reset()
user_agent.reset()
user_msg = HumanMessage(
content=(
f"{user_sys_msg.content}. "
"Now start to give me introductions one by one. "
"Only reply with Instruction and Input."
)
)
assistant_msg = HumanMessage(content=f"{assistant_sys_msg.content}")
assistant_msg = assistant_agent.step(user_msg)
print(f"Original task prompt:\n{task}\n")
print(f"Specified task prompt:\n{specified_task}\n")
chat_turn_limit, n = 30, 0
while n < chat_turn_limit:
n += 1
user_ai_msg = user_agent.step(assistant_msg)
user_msg = HumanMessage(content=user_ai_msg.content)
print(f"AI User ({user_role_name}):\n\n{user_msg.content}\n\n")
assistant_ai_msg = assistant_agent.step(user_msg)
assistant_msg = | HumanMessage(content=assistant_ai_msg.content) | langchain.schema.HumanMessage |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pymilvus')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Milvus
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
from langchain_community.document_loaders import TextLoader
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
vector_db = Milvus.from_documents(
docs,
embeddings,
connection_args={"host": "127.0.0.1", "port": "19530"},
)
query = "What did the president say about Ketanji Brown Jackson"
docs = vector_db.similarity_search(query)
docs[0].page_content
vector_db = Milvus.from_documents(
docs,
embeddings,
collection_name="collection_1",
connection_args={"host": "127.0.0.1", "port": "19530"},
)
vector_db = Milvus(
embeddings,
connection_args={"host": "127.0.0.1", "port": "19530"},
collection_name="collection_1",
)
from langchain_core.documents import Document
docs = [
Document(page_content="i worked at kensho", metadata={"namespace": "harrison"}),
Document(page_content="i worked at facebook", metadata={"namespace": "ankush"}),
]
vectorstore = Milvus.from_documents(
docs,
embeddings,
connection_args={"host": "127.0.0.1", "port": "19530"},
drop_old=True,
partition_key_field="namespace", # Use the "namespace" field as the partition key
)
vectorstore.as_retriever(
search_kwargs={"expr": 'namespace == "ankush"'}
).get_relevant_documents("where did i work?")
vectorstore.as_retriever(
search_kwargs={"expr": 'namespace == "harrison"'}
).get_relevant_documents("where did i work?")
from langchain.docstore.document import Document
docs = [
| Document(page_content="foo", metadata={"id": 1}) | langchain.docstore.document.Document |
from langchain.prompts import FewShotPromptTemplate, PromptTemplate
from langchain.prompts.example_selector import (
MaxMarginalRelevanceExampleSelector,
SemanticSimilarityExampleSelector,
)
from langchain_community.vectorstores import FAISS
from langchain_openai import OpenAIEmbeddings
example_prompt = PromptTemplate(
input_variables=["input", "output"],
template="Input: {input}\nOutput: {output}",
)
examples = [
{"input": "happy", "output": "sad"},
{"input": "tall", "output": "short"},
{"input": "energetic", "output": "lethargic"},
{"input": "sunny", "output": "gloomy"},
{"input": "windy", "output": "calm"},
]
example_selector = MaxMarginalRelevanceExampleSelector.from_examples(
examples,
OpenAIEmbeddings(),
FAISS,
k=2,
)
mmr_prompt = FewShotPromptTemplate(
example_selector=example_selector,
example_prompt=example_prompt,
prefix="Give the antonym of every input",
suffix="Input: {adjective}\nOutput:",
input_variables=["adjective"],
)
print(mmr_prompt.format(adjective="worried"))
example_selector = SemanticSimilarityExampleSelector.from_examples(
examples,
| OpenAIEmbeddings() | langchain_openai.OpenAIEmbeddings |
from langchain.chains import LLMChain
from langchain.memory import ConversationBufferMemory
from langchain.prompts import PromptTemplate
from langchain_openai import OpenAI
template = """You are a chatbot having a conversation with a human.
{chat_history}
Human: {human_input}
Chatbot:"""
prompt = PromptTemplate(
input_variables=["chat_history", "human_input"], template=template
)
memory = ConversationBufferMemory(memory_key="chat_history")
llm = OpenAI()
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
verbose=True,
memory=memory,
)
llm_chain.predict(human_input="Hi there my friend")
llm_chain.predict(human_input="Not too bad - how are you?")
from langchain.prompts import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
)
from langchain_core.messages import SystemMessage
from langchain_openai import ChatOpenAI
prompt = ChatPromptTemplate.from_messages(
[
SystemMessage(
content="You are a chatbot having a conversation with a human."
), # The persistent system prompt
MessagesPlaceholder(
variable_name="chat_history"
), # Where the memory will be stored.
HumanMessagePromptTemplate.from_template(
"{human_input}"
), # Where the human input will injected
]
)
memory = | ConversationBufferMemory(memory_key="chat_history", return_messages=True) | langchain.memory.ConversationBufferMemory |
from langchain.agents import AgentType, initialize_agent
from langchain.requests import Requests
from langchain_community.agent_toolkits import NLAToolkit
from langchain_openai import OpenAI
llm = OpenAI(
temperature=0, max_tokens=700, model_name="gpt-3.5-turbo-instruct"
) # You can swap between different core LLM's here.
speak_toolkit = | NLAToolkit.from_llm_and_url(llm, "https://api.speak.com/openapi.yaml") | langchain_community.agent_toolkits.NLAToolkit.from_llm_and_url |
SOURCE = "test" # @param {type:"Query"|"CollectionGroup"|"DocumentReference"|"string"}
get_ipython().run_line_magic('pip', 'install -upgrade --quiet langchain-google-datastore')
PROJECT_ID = "my-project-id" # @param {type:"string"}
get_ipython().system('gcloud config set project {PROJECT_ID}')
from google.colab import auth
auth.authenticate_user()
get_ipython().system('gcloud services enable datastore.googleapis.com')
from langchain_core.documents import Document
from langchain_google_datastore import DatastoreSaver
data = [Document(page_content="Hello, World!")]
saver = DatastoreSaver()
saver.upsert_documents(data)
saver = DatastoreSaver("Collection")
saver.upsert_documents(data)
doc_ids = ["AnotherCollection/doc_id", "foo/bar"]
saver = | DatastoreSaver() | langchain_google_datastore.DatastoreSaver |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet lxml')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet html2text')
from langchain_community.document_loaders import EverNoteLoader
loader = | EverNoteLoader("example_data/testing.enex") | langchain_community.document_loaders.EverNoteLoader |
from langchain_community.vectorstores import AnalyticDB
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
from langchain_community.document_loaders import TextLoader
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = | OpenAIEmbeddings() | langchain_openai.OpenAIEmbeddings |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
from langchain.model_laboratory import ModelLaboratory
from langchain.prompts import PromptTemplate
from langchain_community.llms import Cohere, HuggingFaceHub
from langchain_openai import OpenAI
import getpass
import os
os.environ["COHERE_API_KEY"] = getpass.getpass("Cohere API Key:")
os.environ["OPENAI_API_KEY"] = getpass.getpass("Open API Key:")
os.environ["HUGGINGFACEHUB_API_TOKEN"] = getpass.getpass("Hugging Face API Key:")
llms = [
OpenAI(temperature=0),
Cohere(temperature=0),
HuggingFaceHub(repo_id="google/flan-t5-xl", model_kwargs={"temperature": 1}),
]
model_lab = ModelLaboratory.from_llms(llms)
model_lab.compare("What color is a flamingo?")
prompt = PromptTemplate(
template="What is the capital of {state}?", input_variables=["state"]
)
model_lab_with_prompt = ModelLaboratory.from_llms(llms, prompt=prompt)
model_lab_with_prompt.compare("New York")
from langchain.chains import SelfAskWithSearchChain
from langchain_community.utilities import SerpAPIWrapper
open_ai_llm = OpenAI(temperature=0)
search = SerpAPIWrapper()
self_ask_with_search_openai = SelfAskWithSearchChain(
llm=open_ai_llm, search_chain=search, verbose=True
)
cohere_llm = Cohere(temperature=0)
search = SerpAPIWrapper()
self_ask_with_search_cohere = SelfAskWithSearchChain(
llm=cohere_llm, search_chain=search, verbose=True
)
chains = [self_ask_with_search_openai, self_ask_with_search_cohere]
names = [str(open_ai_llm), str(cohere_llm)]
model_lab = | ModelLaboratory(chains, names=names) | langchain.model_laboratory.ModelLaboratory |
from typing import Callable, List
from langchain.schema import (
HumanMessage,
SystemMessage,
)
from langchain_openai import ChatOpenAI
class DialogueAgent:
def __init__(
self,
name: str,
system_message: SystemMessage,
model: ChatOpenAI,
) -> None:
self.name = name
self.system_message = system_message
self.model = model
self.prefix = f"{self.name}: "
self.reset()
def reset(self):
self.message_history = ["Here is the conversation so far."]
def send(self) -> str:
"""
Applies the chatmodel to the message history
and returns the message string
"""
message = self.model(
[
self.system_message,
HumanMessage(content="\n".join(self.message_history + [self.prefix])),
]
)
return message.content
def receive(self, name: str, message: str) -> None:
"""
Concatenates {message} spoken by {name} into message history
"""
self.message_history.append(f"{name}: {message}")
class DialogueSimulator:
def __init__(
self,
agents: List[DialogueAgent],
selection_function: Callable[[int, List[DialogueAgent]], int],
) -> None:
self.agents = agents
self._step = 0
self.select_next_speaker = selection_function
def reset(self):
for agent in self.agents:
agent.reset()
def inject(self, name: str, message: str):
"""
Initiates the conversation with a {message} from {name}
"""
for agent in self.agents:
agent.receive(name, message)
self._step += 1
def step(self) -> tuple[str, str]:
speaker_idx = self.select_next_speaker(self._step, self.agents)
speaker = self.agents[speaker_idx]
message = speaker.send()
for receiver in self.agents:
receiver.receive(speaker.name, message)
self._step += 1
return speaker.name, message
character_names = ["Harry Potter", "Ron Weasley", "Hermione Granger", "Argus Filch"]
storyteller_name = "Dungeon Master"
quest = "Find all of Lord Voldemort's seven horcruxes."
word_limit = 50 # word limit for task brainstorming
game_description = f"""Here is the topic for a Dungeons & Dragons game: {quest}.
The characters are: {*character_names,}.
The story is narrated by the storyteller, {storyteller_name}."""
player_descriptor_system_message = SystemMessage(
content="You can add detail to the description of a Dungeons & Dragons player."
)
def generate_character_description(character_name):
character_specifier_prompt = [
player_descriptor_system_message,
HumanMessage(
content=f"""{game_description}
Please reply with a creative description of the character, {character_name}, in {word_limit} words or less.
Speak directly to {character_name}.
Do not add anything else."""
),
]
character_description = ChatOpenAI(temperature=1.0)(
character_specifier_prompt
).content
return character_description
def generate_character_system_message(character_name, character_description):
return SystemMessage(
content=(
f"""{game_description}
Your name is {character_name}.
Your character description is as follows: {character_description}.
You will propose actions you plan to take and {storyteller_name} will explain what happens when you take those actions.
Speak in the first person from the perspective of {character_name}.
For describing your own body movements, wrap your description in '*'.
Do not change roles!
Do not speak from the perspective of anyone else.
Remember you are {character_name}.
Stop speaking the moment you finish speaking from your perspective.
Never forget to keep your response to {word_limit} words!
Do not add anything else.
"""
)
)
character_descriptions = [
generate_character_description(character_name) for character_name in character_names
]
character_system_messages = [
generate_character_system_message(character_name, character_description)
for character_name, character_description in zip(
character_names, character_descriptions
)
]
storyteller_specifier_prompt = [
player_descriptor_system_message,
HumanMessage(
content=f"""{game_description}
Please reply with a creative description of the storyteller, {storyteller_name}, in {word_limit} words or less.
Speak directly to {storyteller_name}.
Do not add anything else."""
),
]
storyteller_description = ChatOpenAI(temperature=1.0)(
storyteller_specifier_prompt
).content
storyteller_system_message = SystemMessage(
content=(
f"""{game_description}
You are the storyteller, {storyteller_name}.
Your description is as follows: {storyteller_description}.
The other players will propose actions to take and you will explain what happens when they take those actions.
Speak in the first person from the perspective of {storyteller_name}.
Do not change roles!
Do not speak from the perspective of anyone else.
Remember you are the storyteller, {storyteller_name}.
Stop speaking the moment you finish speaking from your perspective.
Never forget to keep your response to {word_limit} words!
Do not add anything else.
"""
)
)
print("Storyteller Description:")
print(storyteller_description)
for character_name, character_description in zip(
character_names, character_descriptions
):
print(f"{character_name} Description:")
print(character_description)
quest_specifier_prompt = [
| SystemMessage(content="You can make a task more specific.") | langchain.schema.SystemMessage |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet "docarray[hnswlib]"')
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import DocArrayHnswSearch
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
documents = TextLoader("../../modules/state_of_the_union.txt").load()
text_splitter = | CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) | langchain_text_splitters.CharacterTextSplitter |
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_openai.chat_models import ChatOpenAI
model = ChatOpenAI()
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You're an assistant who's good at {ability}. Respond in 20 words or fewer",
),
MessagesPlaceholder(variable_name="history"),
("human", "{input}"),
]
)
runnable = prompt | model
from langchain_community.chat_message_histories import ChatMessageHistory
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.runnables.history import RunnableWithMessageHistory
store = {}
def get_session_history(session_id: str) -> BaseChatMessageHistory:
if session_id not in store:
store[session_id] = ChatMessageHistory()
return store[session_id]
with_message_history = RunnableWithMessageHistory(
runnable,
get_session_history,
input_messages_key="input",
history_messages_key="history",
)
with_message_history.invoke(
{"ability": "math", "input": "What does cosine mean?"},
config={"configurable": {"session_id": "abc123"}},
)
with_message_history.invoke(
{"ability": "math", "input": "What?"},
config={"configurable": {"session_id": "abc123"}},
)
with_message_history.invoke(
{"ability": "math", "input": "What?"},
config={"configurable": {"session_id": "def234"}},
)
from langchain_core.runnables import ConfigurableFieldSpec
store = {}
def get_session_history(user_id: str, conversation_id: str) -> BaseChatMessageHistory:
if (user_id, conversation_id) not in store:
store[(user_id, conversation_id)] = ChatMessageHistory()
return store[(user_id, conversation_id)]
with_message_history = RunnableWithMessageHistory(
runnable,
get_session_history,
input_messages_key="input",
history_messages_key="history",
history_factory_config=[
ConfigurableFieldSpec(
id="user_id",
annotation=str,
name="User ID",
description="Unique identifier for the user.",
default="",
is_shared=True,
),
ConfigurableFieldSpec(
id="conversation_id",
annotation=str,
name="Conversation ID",
description="Unique identifier for the conversation.",
default="",
is_shared=True,
),
],
)
with_message_history.invoke(
{"ability": "math", "input": "Hello"},
config={"configurable": {"user_id": "123", "conversation_id": "1"}},
)
from langchain_core.messages import HumanMessage
from langchain_core.runnables import RunnableParallel
chain = RunnableParallel({"output_message": ChatOpenAI()})
def get_session_history(session_id: str) -> BaseChatMessageHistory:
if session_id not in store:
store[session_id] = ChatMessageHistory()
return store[session_id]
with_message_history = RunnableWithMessageHistory(
chain,
get_session_history,
output_messages_key="output_message",
)
with_message_history.invoke(
[HumanMessage(content="What did Simone de Beauvoir believe about free will")],
config={"configurable": {"session_id": "baz"}},
)
with_message_history.invoke(
[HumanMessage(content="How did this compare to Sartre")],
config={"configurable": {"session_id": "baz"}},
)
RunnableWithMessageHistory(
ChatOpenAI(),
get_session_history,
)
from operator import itemgetter
RunnableWithMessageHistory(
itemgetter("input_messages") | | ChatOpenAI() | langchain_openai.chat_models.ChatOpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-core databricks-vectorsearch langchain-openai tiktoken')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
from langchain_community.document_loaders import TextLoader
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
emb_dim = len(embeddings.embed_query("hello"))
from databricks.vector_search.client import VectorSearchClient
vsc = VectorSearchClient()
vsc.create_endpoint(name="vector_search_demo_endpoint", endpoint_type="STANDARD")
vector_search_endpoint_name = "vector_search_demo_endpoint"
index_name = "ml.llm.demo_index"
index = vsc.create_direct_access_index(
endpoint_name=vector_search_endpoint_name,
index_name=index_name,
primary_key="id",
embedding_dimension=emb_dim,
embedding_vector_column="text_vector",
schema={
"id": "string",
"text": "string",
"text_vector": "array<float>",
"source": "string",
},
)
index.describe()
from langchain_community.vectorstores import DatabricksVectorSearch
dvs = DatabricksVectorSearch(
index, text_column="text", embedding=embeddings, columns=["source"]
)
dvs.add_documents(docs)
query = "What did the president say about Ketanji Brown Jackson"
dvs.similarity_search(query)
print(docs[0].page_content)
dvs_delta_sync = | DatabricksVectorSearch("catalog_name.schema_name.delta_sync_index") | langchain_community.vectorstores.DatabricksVectorSearch |
import os
import comet_llm
os.environ["LANGCHAIN_COMET_TRACING"] = "true"
comet_llm.init()
os.environ["COMET_PROJECT_NAME"] = "comet-example-langchain-tracing"
from langchain.agents import AgentType, initialize_agent, load_tools
from langchain.llms import OpenAI
llm = OpenAI(temperature=0)
tools = load_tools(["llm-math"], llm=llm)
agent = initialize_agent(
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
agent.run("What is 2 raised to .123243 power?") # this should be traced
if "LANGCHAIN_COMET_TRACING" in os.environ:
del os.environ["LANGCHAIN_COMET_TRACING"]
from langchain.callbacks.tracers.comet import CometTracer
tracer = CometTracer()
llm = OpenAI(temperature=0)
tools = | load_tools(["llm-math"], llm=llm) | langchain.agents.load_tools |
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass()
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import FAISS
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
db = FAISS.from_documents(docs, embeddings)
query = "What did the president say about Ketanji Brown Jackson"
docs = db.similarity_search(query)
print(docs[0].page_content)
retriever = db.as_retriever()
docs = retriever.invoke(query)
print(docs[0].page_content)
docs_and_scores = db.similarity_search_with_score(query)
docs_and_scores[0]
embedding_vector = embeddings.embed_query(query)
docs_and_scores = db.similarity_search_by_vector(embedding_vector)
db.save_local("faiss_index")
new_db = FAISS.load_local("faiss_index", embeddings)
docs = new_db.similarity_search(query)
docs[0]
from langchain_community.embeddings.huggingface import HuggingFaceEmbeddings
pkl = db.serialize_to_bytes() # serializes the faiss
embeddings = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
db = FAISS.deserialize_from_bytes(
embeddings=embeddings, serialized=pkl
) # Load the index
db1 = FAISS.from_texts(["foo"], embeddings)
db2 = | FAISS.from_texts(["bar"], embeddings) | langchain_community.vectorstores.FAISS.from_texts |
from langchain.evaluation import load_evaluator
evaluator = load_evaluator("criteria", criteria="conciseness")
from langchain.evaluation import EvaluatorType
evaluator = load_evaluator(EvaluatorType.CRITERIA, criteria="conciseness")
eval_result = evaluator.evaluate_strings(
prediction="What's 2+2? That's an elementary question. The answer you're looking for is that two and two is four.",
input="What's 2+2?",
)
print(eval_result)
evaluator = load_evaluator("labeled_criteria", criteria="correctness")
eval_result = evaluator.evaluate_strings(
input="What is the capital of the US?",
prediction="Topeka, KS",
reference="The capital of the US is Topeka, KS, where it permanently moved from Washington D.C. on May 16, 2023",
)
print(f'With ground truth: {eval_result["score"]}')
from langchain.evaluation import Criteria
list(Criteria)
custom_criterion = {
"numeric": "Does the output contain numeric or mathematical information?"
}
eval_chain = load_evaluator(
EvaluatorType.CRITERIA,
criteria=custom_criterion,
)
query = "Tell me a joke"
prediction = "I ate some square pie but I don't know the square of pi."
eval_result = eval_chain.evaluate_strings(prediction=prediction, input=query)
print(eval_result)
custom_criteria = {
"numeric": "Does the output contain numeric information?",
"mathematical": "Does the output contain mathematical information?",
"grammatical": "Is the output grammatically correct?",
"logical": "Is the output logical?",
}
eval_chain = load_evaluator(
EvaluatorType.CRITERIA,
criteria=custom_criteria,
)
eval_result = eval_chain.evaluate_strings(prediction=prediction, input=query)
print("Multi-criteria evaluation")
print(eval_result)
from langchain.chains.constitutional_ai.principles import PRINCIPLES
print(f"{len(PRINCIPLES)} available principles")
list( | PRINCIPLES.items() | langchain.chains.constitutional_ai.principles.PRINCIPLES.items |
from langchain.callbacks import get_openai_callback
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(model_name="gpt-4")
with get_openai_callback() as cb:
result = llm.invoke("Tell me a joke")
print(cb)
with get_openai_callback() as cb:
result = llm.invoke("Tell me a joke")
result2 = llm.invoke("Tell me a joke")
print(cb.total_tokens)
from langchain.agents import AgentType, initialize_agent, load_tools
from langchain_openai import OpenAI
tools = | load_tools(["serpapi", "llm-math"], llm=llm) | langchain.agents.load_tools |
get_ipython().system("python3 -m pip install --upgrade langchain 'deeplake[enterprise]' openai tiktoken")
import getpass
import os
from langchain.chains import RetrievalQA
from langchain_community.vectorstores import DeepLake
from langchain_openai import OpenAI, OpenAIEmbeddings
from langchain_text_splitters import (
CharacterTextSplitter,
RecursiveCharacterTextSplitter,
)
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
activeloop_token = getpass.getpass("Activeloop Token:")
os.environ["ACTIVELOOP_TOKEN"] = activeloop_token
os.environ["ACTIVELOOP_ORG"] = getpass.getpass("Activeloop Org:")
org_id = os.environ["ACTIVELOOP_ORG"]
embeddings = OpenAIEmbeddings()
dataset_path = "hub://" + org_id + "/data"
with open("messages.txt") as f:
state_of_the_union = f.read()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
pages = text_splitter.split_text(state_of_the_union)
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
texts = text_splitter.create_documents(pages)
print(texts)
dataset_path = "hub://" + org_id + "/data"
embeddings = OpenAIEmbeddings()
db = DeepLake.from_documents(
texts, embeddings, dataset_path=dataset_path, overwrite=True
)
db = | DeepLake(dataset_path=dataset_path, read_only=True, embedding=embeddings) | langchain_community.vectorstores.DeepLake |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet predictionguard langchain')
import os
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_community.llms import PredictionGuard
os.environ["OPENAI_API_KEY"] = "<your OpenAI api key>"
os.environ["PREDICTIONGUARD_TOKEN"] = "<your Prediction Guard access token>"
pgllm = PredictionGuard(model="OpenAI-text-davinci-003")
pgllm("Tell me a joke")
template = """Respond to the following query based on the context.
Context: EVERY comment, DM + email suggestion has led us to this EXCITING announcement! π We have officially added TWO new candle subscription box options! π¦
Exclusive Candle Box - $80
Monthly Candle Box - $45 (NEW!)
Scent of The Month Box - $28 (NEW!)
Head to stories to get ALLL the deets on each box! π BONUS: Save 50% on your first box with code 50OFF! π
Query: {query}
Result: """
prompt = | PromptTemplate.from_template(template) | langchain.prompts.PromptTemplate.from_template |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-pinecone langchain-openai langchain')
from langchain_community.document_loaders import TextLoader
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
from langchain_pinecone import PineconeVectorStore
index_name = "langchain-test-index"
docsearch = | PineconeVectorStore.from_documents(docs, embeddings, index_name=index_name) | langchain_pinecone.PineconeVectorStore.from_documents |
get_ipython().system('pip install --quiet langchain_experimental langchain_openai')
with open("../../state_of_the_union.txt") as f:
state_of_the_union = f.read()
from langchain_experimental.text_splitter import SemanticChunker
from langchain_openai.embeddings import OpenAIEmbeddings
text_splitter = SemanticChunker(OpenAIEmbeddings())
docs = text_splitter.create_documents([state_of_the_union])
print(docs[0].page_content)
text_splitter = SemanticChunker(
OpenAIEmbeddings(), breakpoint_threshold_type="percentile"
)
docs = text_splitter.create_documents([state_of_the_union])
print(docs[0].page_content)
print(len(docs))
text_splitter = SemanticChunker(
| OpenAIEmbeddings() | langchain_openai.embeddings.OpenAIEmbeddings |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet lark clickhouse-connect')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
os.environ["MYSCALE_HOST"] = getpass.getpass("MyScale URL:")
os.environ["MYSCALE_PORT"] = getpass.getpass("MyScale Port:")
os.environ["MYSCALE_USERNAME"] = getpass.getpass("MyScale Username:")
os.environ["MYSCALE_PASSWORD"] = getpass.getpass("MyScale Password:")
from langchain_community.vectorstores import MyScale
from langchain_core.documents import Document
from langchain_openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
docs = [
Document(
page_content="A bunch of scientists bring back dinosaurs and mayhem breaks loose",
metadata={"date": "1993-07-02", "rating": 7.7, "genre": ["science fiction"]},
),
Document(
page_content="Leo DiCaprio gets lost in a dream within a dream within a dream within a ...",
metadata={"date": "2010-12-30", "director": "Christopher Nolan", "rating": 8.2},
),
Document(
page_content="A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea",
metadata={"date": "2006-04-23", "director": "Satoshi Kon", "rating": 8.6},
),
Document(
page_content="A bunch of normal-sized women are supremely wholesome and some men pine after them",
metadata={"date": "2019-08-22", "director": "Greta Gerwig", "rating": 8.3},
),
Document(
page_content="Toys come alive and have a blast doing so",
metadata={"date": "1995-02-11", "genre": ["animated"]},
),
Document(
page_content="Three men walk into the Zone, three men walk out of the Zone",
metadata={
"date": "1979-09-10",
"director": "Andrei Tarkovsky",
"genre": ["science fiction", "adventure"],
"rating": 9.9,
},
),
]
vectorstore = MyScale.from_documents(
docs,
embeddings,
)
from langchain.chains.query_constructor.base import AttributeInfo
from langchain.retrievers.self_query.base import SelfQueryRetriever
from langchain_openai import OpenAI
metadata_field_info = [
AttributeInfo(
name="genre",
description="The genres of the movie",
type="list[string]",
),
| AttributeInfo(
name="length(genre) | langchain.chains.query_constructor.base.AttributeInfo |
get_ipython().run_line_magic('pip', 'install -qU chromadb langchain langchain-community langchain-openai')
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
loader = | TextLoader("../../state_of_the_union.txt") | langchain_community.document_loaders.TextLoader |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet duckduckgo-search')
from langchain.tools import DuckDuckGoSearchRun
search = DuckDuckGoSearchRun()
search.run("Obama's first name?")
from langchain.tools import DuckDuckGoSearchResults
search = DuckDuckGoSearchResults()
search.run("Obama")
search = | DuckDuckGoSearchResults(backend="news") | langchain.tools.DuckDuckGoSearchResults |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-nvidia-ai-endpoints')
import getpass
import os
if not os.environ.get("NVIDIA_API_KEY", "").startswith("nvapi-"):
nvapi_key = getpass.getpass("Enter your NVIDIA API key: ")
assert nvapi_key.startswith("nvapi-"), f"{nvapi_key[:5]}... is not a valid key"
os.environ["NVIDIA_API_KEY"] = nvapi_key
from langchain_nvidia_ai_endpoints import ChatNVIDIA
llm = ChatNVIDIA(model="mixtral_8x7b")
result = llm.invoke("Write a ballad about LangChain.")
print(result.content)
print(llm.batch(["What's 2*3?", "What's 2*6?"]))
for chunk in llm.stream("How far can a seagull fly in one day?"):
print(chunk.content, end="|")
async for chunk in llm.astream(
"How long does it take for monarch butterflies to migrate?"
):
print(chunk.content, end="|")
ChatNVIDIA.get_available_models()
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_nvidia_ai_endpoints import ChatNVIDIA
prompt = ChatPromptTemplate.from_messages(
[("system", "You are a helpful AI assistant named Fred."), ("user", "{input}")]
)
chain = prompt | ChatNVIDIA(model="llama2_13b") | StrOutputParser()
for txt in chain.stream({"input": "What's your name?"}):
print(txt, end="")
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You are an expert coding AI. Respond only in valid python; no narration whatsoever.",
),
("user", "{input}"),
]
)
chain = prompt | ChatNVIDIA(model="llama2_code_70b") | StrOutputParser()
for txt in chain.stream({"input": "How do I solve this fizz buzz problem?"}):
print(txt, end="")
from langchain_nvidia_ai_endpoints import ChatNVIDIA
llm = ChatNVIDIA(model="nemotron_steerlm_8b")
complex_result = llm.invoke(
"What's a PB&J?", labels={"creativity": 0, "complexity": 3, "verbosity": 0}
)
print("Un-creative\n")
print(complex_result.content)
print("\n\nCreative\n")
creative_result = llm.invoke(
"What's a PB&J?", labels={"creativity": 9, "complexity": 3, "verbosity": 9}
)
print(creative_result.content)
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_nvidia_ai_endpoints import ChatNVIDIA
prompt = ChatPromptTemplate.from_messages(
[("system", "You are a helpful AI assistant named Fred."), ("user", "{input}")]
)
chain = (
prompt
| ChatNVIDIA(model="nemotron_steerlm_8b").bind(
labels={"creativity": 9, "complexity": 0, "verbosity": 9}
)
| StrOutputParser()
)
for txt in chain.stream({"input": "Why is a PB&J?"}):
print(txt, end="")
import IPython
import requests
image_url = "https://www.nvidia.com/content/dam/en-zz/Solutions/research/ai-playground/nvidia-picasso-3c33-p@2x.jpg" ## Large Image
image_content = requests.get(image_url).content
IPython.display.Image(image_content)
from langchain_nvidia_ai_endpoints import ChatNVIDIA
llm = ChatNVIDIA(model="playground_neva_22b")
from langchain_core.messages import HumanMessage
llm.invoke(
[
HumanMessage(
content=[
{"type": "text", "text": "Describe this image:"},
{"type": "image_url", "image_url": {"url": image_url}},
]
)
]
)
from langchain_core.messages import HumanMessage
llm.invoke(
[
HumanMessage(
content=[
{"type": "text", "text": "Describe this image:"},
{"type": "image_url", "image_url": {"url": image_url}},
]
)
],
labels={"creativity": 0, "quality": 9, "complexity": 0, "verbosity": 0},
)
import IPython
import requests
image_url = "https://picsum.photos/seed/kitten/300/200"
image_content = requests.get(image_url).content
IPython.display.Image(image_content)
import base64
from langchain_core.messages import HumanMessage
b64_string = base64.b64encode(image_content).decode("utf-8")
llm.invoke(
[
HumanMessage(
content=[
{"type": "text", "text": "Describe this image:"},
{
"type": "image_url",
"image_url": {"url": f"data:image/png;base64,{b64_string}"},
},
]
)
]
)
base64_with_mime_type = f"data:image/png;base64,{b64_string}"
llm.invoke(f'What\'s in this image?\n<img src="{base64_with_mime_type}" />')
from langchain_nvidia_ai_endpoints import ChatNVIDIA
kosmos = ChatNVIDIA(model="kosmos_2")
from langchain_core.messages import HumanMessage
def drop_streaming_key(d):
"""Takes in payload dictionary, outputs new payload dictionary"""
if "stream" in d:
d.pop("stream")
return d
kosmos = | ChatNVIDIA(model="kosmos_2") | langchain_nvidia_ai_endpoints.ChatNVIDIA |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet comet_ml langchain langchain-openai google-search-results spacy textstat pandas')
get_ipython().system('{sys.executable} -m spacy download en_core_web_sm')
import comet_ml
comet_ml.init(project_name="comet-example-langchain")
import os
os.environ["OPENAI_API_KEY"] = "..."
os.environ["SERPAPI_API_KEY"] = "..."
from langchain.callbacks import CometCallbackHandler, StdOutCallbackHandler
from langchain_openai import OpenAI
comet_callback = CometCallbackHandler(
project_name="comet-example-langchain",
complexity_metrics=True,
stream_logs=True,
tags=["llm"],
visualizations=["dep"],
)
callbacks = [StdOutCallbackHandler(), comet_callback]
llm = OpenAI(temperature=0.9, callbacks=callbacks, verbose=True)
llm_result = llm.generate(["Tell me a joke", "Tell me a poem", "Tell me a fact"] * 3)
print("LLM result", llm_result)
comet_callback.flush_tracker(llm, finish=True)
from langchain.callbacks import CometCallbackHandler, StdOutCallbackHandler
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_openai import OpenAI
comet_callback = CometCallbackHandler(
complexity_metrics=True,
project_name="comet-example-langchain",
stream_logs=True,
tags=["synopsis-chain"],
)
callbacks = [StdOutCallbackHandler(), comet_callback]
llm = OpenAI(temperature=0.9, callbacks=callbacks)
template = """You are a playwright. Given the title of play, it is your job to write a synopsis for that title.
Title: {title}
Playwright: This is a synopsis for the above play:"""
prompt_template = PromptTemplate(input_variables=["title"], template=template)
synopsis_chain = | LLMChain(llm=llm, prompt=prompt_template, callbacks=callbacks) | langchain.chains.LLMChain |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet promptlayer --upgrade')
import promptlayer # Don't forget this π°
from langchain.callbacks import PromptLayerCallbackHandler
from langchain.schema import (
HumanMessage,
)
from langchain_openai import ChatOpenAI
chat_llm = ChatOpenAI(
temperature=0,
callbacks=[ | PromptLayerCallbackHandler(pl_tags=["chatopenai"]) | langchain.callbacks.PromptLayerCallbackHandler |
get_ipython().run_cell_magic('writefile', 'discord_chats.txt', "talkingtower β 08/15/2023 11:10 AM\nLove music! Do you like jazz?\nreporterbob β 08/15/2023 9:27 PM\nYes! Jazz is fantastic. Ever heard this one?\nWebsite\nListen to classic jazz track...\n\ntalkingtower β Yesterday at 5:03 AM\nIndeed! Great choice. π·\nreporterbob β Yesterday at 5:23 AM\nThanks! How about some virtual sightseeing?\nWebsite\nVirtual tour of famous landmarks...\n\ntalkingtower β Today at 2:38 PM\nSounds fun! Let's explore.\nreporterbob β Today at 2:56 PM\nEnjoy the tour! See you around.\ntalkingtower β Today at 3:00 PM\nThank you! Goodbye! π\nreporterbob β Today at 3:02 PM\nFarewell! Happy exploring.\n")
import logging
import re
from typing import Iterator, List
from langchain_community.chat_loaders import base as chat_loaders
from langchain_core.messages import BaseMessage, HumanMessage
logger = logging.getLogger()
class DiscordChatLoader(chat_loaders.BaseChatLoader):
def __init__(self, path: str):
"""
Initialize the Discord chat loader.
Args:
path: Path to the exported Discord chat text file.
"""
self.path = path
self._message_line_regex = re.compile(
r"(.+?) β (\w{3,9} \d{1,2}(?:st|nd|rd|th)?(?:, \d{4})? \d{1,2}:\d{2} (?:AM|PM)|Today at \d{1,2}:\d{2} (?:AM|PM)|Yesterday at \d{1,2}:\d{2} (?:AM|PM))", # noqa
flags=re.DOTALL,
)
def _load_single_chat_session_from_txt(
self, file_path: str
) -> chat_loaders.ChatSession:
"""
Load a single chat session from a text file.
Args:
file_path: Path to the text file containing the chat messages.
Returns:
A `ChatSession` object containing the loaded chat messages.
"""
with open(file_path, "r", encoding="utf-8") as file:
lines = file.readlines()
results: List[BaseMessage] = []
current_sender = None
current_timestamp = None
current_content = []
for line in lines:
if re.match(
r".+? β (\d{2}/\d{2}/\d{4} \d{1,2}:\d{2} (?:AM|PM)|Today at \d{1,2}:\d{2} (?:AM|PM)|Yesterday at \d{1,2}:\d{2} (?:AM|PM))", # noqa
line,
):
if current_sender and current_content:
results.append(
HumanMessage(
content="".join(current_content).strip(),
additional_kwargs={
"sender": current_sender,
"events": [{"message_time": current_timestamp}],
},
)
)
current_sender, current_timestamp = line.split(" β ")[:2]
current_content = [
line[len(current_sender) + len(current_timestamp) + 4 :].strip()
]
elif re.match(r"\[\d{1,2}:\d{2} (?:AM|PM)\]", line.strip()):
results.append(
HumanMessage(
content="".join(current_content).strip(),
additional_kwargs={
"sender": current_sender,
"events": [{"message_time": current_timestamp}],
},
)
)
current_timestamp = line.strip()[1:-1]
current_content = []
else:
current_content.append("\n" + line.strip())
if current_sender and current_content:
results.append(
HumanMessage(
content="".join(current_content).strip(),
additional_kwargs={
"sender": current_sender,
"events": [{"message_time": current_timestamp}],
},
)
)
return chat_loaders.ChatSession(messages=results)
def lazy_load(self) -> Iterator[chat_loaders.ChatSession]:
"""
Lazy load the messages from the chat file and yield them in the required format.
Yields:
A `ChatSession` object containing the loaded chat messages.
"""
yield self._load_single_chat_session_from_txt(self.path)
loader = DiscordChatLoader(
path="./discord_chats.txt",
)
from typing import List
from langchain_community.chat_loaders.base import ChatSession
from langchain_community.chat_loaders.utils import (
map_ai_messages,
merge_chat_runs,
)
raw_messages = loader.lazy_load()
merged_messages = merge_chat_runs(raw_messages)
messages: List[ChatSession] = list(
map_ai_messages(merged_messages, sender="talkingtower")
)
messages
from langchain_openai import ChatOpenAI
llm = | ChatOpenAI() | langchain_openai.ChatOpenAI |
from typing import List, Optional
from langchain.chains.openai_tools import create_extraction_chain_pydantic
from langchain_core.pydantic_v1 import BaseModel
from langchain_openai import ChatOpenAI
model = ChatOpenAI(model="gpt-3.5-turbo-1106")
class Person(BaseModel):
"""Information about people to extract."""
name: str
age: Optional[int] = None
chain = | create_extraction_chain_pydantic(Person, model) | langchain.chains.openai_tools.create_extraction_chain_pydantic |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet timescale-vector')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-openai')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet tiktoken')
import os
from dotenv import find_dotenv, load_dotenv
_ = load_dotenv(find_dotenv())
OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
from typing import Tuple
from datetime import datetime, timedelta
from langchain.docstore.document import Document
from langchain_community.document_loaders import TextLoader
from langchain_community.document_loaders.json_loader import JSONLoader
from langchain_community.vectorstores.timescalevector import TimescaleVector
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../../extras/modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
SERVICE_URL = os.environ["TIMESCALE_SERVICE_URL"]
COLLECTION_NAME = "state_of_the_union_test"
db = TimescaleVector.from_documents(
embedding=embeddings,
documents=docs,
collection_name=COLLECTION_NAME,
service_url=SERVICE_URL,
)
query = "What did the president say about Ketanji Brown Jackson"
docs_with_score = db.similarity_search_with_score(query)
for doc, score in docs_with_score:
print("-" * 80)
print("Score: ", score)
print(doc.page_content)
print("-" * 80)
retriever = db.as_retriever()
print(retriever)
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(temperature=0.1, model="gpt-3.5-turbo-16k")
from langchain.chains import RetrievalQA
qa_stuff = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=retriever,
verbose=True,
)
query = "What did the president say about Ketanji Brown Jackson?"
response = qa_stuff.run(query)
print(response)
from timescale_vector import client
def create_uuid(date_string: str):
if date_string is None:
return None
time_format = "%a %b %d %H:%M:%S %Y %z"
datetime_obj = datetime.strptime(date_string, time_format)
uuid = client.uuid_from_time(datetime_obj)
return str(uuid)
def split_name(input_string: str) -> Tuple[str, str]:
if input_string is None:
return None, None
start = input_string.find("<")
end = input_string.find(">")
name = input_string[:start].strip()
email = input_string[start + 1 : end].strip()
return name, email
def create_date(input_string: str) -> datetime:
if input_string is None:
return None
month_dict = {
"Jan": "01",
"Feb": "02",
"Mar": "03",
"Apr": "04",
"May": "05",
"Jun": "06",
"Jul": "07",
"Aug": "08",
"Sep": "09",
"Oct": "10",
"Nov": "11",
"Dec": "12",
}
components = input_string.split()
day = components[2]
month = month_dict[components[1]]
year = components[4]
time = components[3]
timezone_offset_minutes = int(components[5]) # Convert the offset to minutes
timezone_hours = timezone_offset_minutes // 60 # Calculate the hours
timezone_minutes = timezone_offset_minutes % 60 # Calculate the remaining minutes
timestamp_tz_str = (
f"{year}-{month}-{day} {time}+{timezone_hours:02}{timezone_minutes:02}"
)
return timestamp_tz_str
def extract_metadata(record: dict, metadata: dict) -> dict:
record_name, record_email = split_name(record["author"])
metadata["id"] = create_uuid(record["date"])
metadata["date"] = create_date(record["date"])
metadata["author_name"] = record_name
metadata["author_email"] = record_email
metadata["commit_hash"] = record["commit"]
return metadata
get_ipython().system('curl -O https://s3.amazonaws.com/assets.timescale.com/ai/ts_git_log.json')
FILE_PATH = "../../../../../ts_git_log.json"
loader = JSONLoader(
file_path=FILE_PATH,
jq_schema=".commit_history[]",
text_content=False,
metadata_func=extract_metadata,
)
documents = loader.load()
documents = [doc for doc in documents if doc.metadata["date"] is not None]
print(documents[0])
NUM_RECORDS = 500
documents = documents[:NUM_RECORDS]
text_splitter = CharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200,
)
docs = text_splitter.split_documents(documents)
COLLECTION_NAME = "timescale_commits"
embeddings = OpenAIEmbeddings()
db = TimescaleVector.from_documents(
embedding=embeddings,
ids=[doc.metadata["id"] for doc in docs],
documents=docs,
collection_name=COLLECTION_NAME,
service_url=SERVICE_URL,
time_partition_interval=timedelta(days=7),
)
start_dt = datetime(2023, 8, 1, 22, 10, 35) # Start date = 1 August 2023, 22:10:35
end_dt = datetime(2023, 8, 30, 22, 10, 35) # End date = 30 August 2023, 22:10:35
td = timedelta(days=7) # Time delta = 7 days
query = "What's new with TimescaleDB functions?"
docs_with_score = db.similarity_search_with_score(
query, start_date=start_dt, end_date=end_dt
)
for doc, score in docs_with_score:
print("-" * 80)
print("Score: ", score)
print("Date: ", doc.metadata["date"])
print(doc.page_content)
print("-" * 80)
docs_with_score = db.similarity_search_with_score(
query, start_date=start_dt, time_delta=td
)
for doc, score in docs_with_score:
print("-" * 80)
print("Score: ", score)
print("Date: ", doc.metadata["date"])
print(doc.page_content)
print("-" * 80)
docs_with_score = db.similarity_search_with_score(query, end_date=end_dt, time_delta=td)
for doc, score in docs_with_score:
print("-" * 80)
print("Score: ", score)
print("Date: ", doc.metadata["date"])
print(doc.page_content)
print("-" * 80)
docs_with_score = db.similarity_search_with_score(query, start_date=start_dt)
for doc, score in docs_with_score:
print("-" * 80)
print("Score: ", score)
print("Date: ", doc.metadata["date"])
print(doc.page_content)
print("-" * 80)
docs_with_score = db.similarity_search_with_score(query, end_date=end_dt)
for doc, score in docs_with_score:
print("-" * 80)
print("Score: ", score)
print("Date: ", doc.metadata["date"])
print(doc.page_content)
print("-" * 80)
retriever = db.as_retriever(search_kwargs={"start_date": start_dt, "end_date": end_dt})
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(temperature=0.1, model="gpt-3.5-turbo-16k")
from langchain.chains import RetrievalQA
qa_stuff = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=retriever,
verbose=True,
)
query = (
"What's new with the timescaledb functions? Tell me when these changes were made."
)
response = qa_stuff.run(query)
print(response)
COLLECTION_NAME = "timescale_commits"
embeddings = OpenAIEmbeddings()
db = TimescaleVector(
collection_name=COLLECTION_NAME,
service_url=SERVICE_URL,
embedding_function=embeddings,
)
db.create_index()
db.drop_index()
db.create_index(index_type="tsv", max_alpha=1.0, num_neighbors=50)
db.drop_index()
db.create_index(index_type="hnsw", m=16, ef_construction=64)
db.drop_index()
db.create_index(index_type="ivfflat", num_lists=20, num_records=1000)
db.drop_index()
db.create_index()
COLLECTION_NAME = "timescale_commits"
vectorstore = TimescaleVector(
embedding_function=OpenAIEmbeddings(),
collection_name=COLLECTION_NAME,
service_url=SERVICE_URL,
)
from langchain.chains.query_constructor.base import AttributeInfo
from langchain.retrievers.self_query.base import SelfQueryRetriever
from langchain_openai import OpenAI
metadata_field_info = [
AttributeInfo(
name="id",
description="A UUID v1 generated from the date of the commit",
type="uuid",
),
AttributeInfo(
name="date",
description="The date of the commit in timestamptz format",
type="timestamptz",
),
AttributeInfo(
name="author_name",
description="The name of the author of the commit",
type="string",
),
AttributeInfo(
name="author_email",
description="The email address of the author of the commit",
type="string",
),
]
document_content_description = "The git log commit summary containing the commit hash, author, date of commit, change summary and change details"
llm = OpenAI(temperature=0)
retriever = SelfQueryRetriever.from_llm(
llm,
vectorstore,
document_content_description,
metadata_field_info,
enable_limit=True,
verbose=True,
)
retriever.get_relevant_documents("What are improvements made to continuous aggregates?")
retriever.get_relevant_documents("What commits did Sven Klemm add?")
retriever.get_relevant_documents(
"What commits about timescaledb_functions did Sven Klemm add?"
)
retriever.get_relevant_documents("What commits were added in July 2023?")
retriever.get_relevant_documents(
"What are two commits about hierarchical continuous aggregates?"
)
COLLECTION_NAME = "timescale_commits"
embeddings = | OpenAIEmbeddings() | langchain_openai.OpenAIEmbeddings |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet meilisearch')
import getpass
import os
os.environ["MEILI_HTTP_ADDR"] = getpass.getpass("Meilisearch HTTP address and port:")
os.environ["MEILI_MASTER_KEY"] = getpass.getpass("Meilisearch API Key:")
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
from langchain_community.vectorstores import Meilisearch
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
embeddings = OpenAIEmbeddings()
with open("../../modules/state_of_the_union.txt") as f:
state_of_the_union = f.read()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_text(state_of_the_union)
vector_store = Meilisearch.from_texts(texts=texts, embedding=embeddings)
from langchain_community.document_loaders import TextLoader
loader = | TextLoader("../../modules/state_of_the_union.txt") | langchain_community.document_loaders.TextLoader |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai faiss-cpu tiktoken')
from langchain.prompts import ChatPromptTemplate
from langchain.vectorstores import FAISS
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnableLambda, RunnablePassthrough
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
vectorstore = FAISS.from_texts(
["harrison worked at kensho"], embedding=OpenAIEmbeddings()
)
retriever = vectorstore.as_retriever()
template = """Answer the question based only on the following context:
{context}
Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)
model = ChatOpenAI()
chain = (
{"context": retriever, "question": | RunnablePassthrough() | langchain_core.runnables.RunnablePassthrough |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet wikipedia')
from langchain import hub
from langchain.agents import AgentExecutor, create_react_agent
from langchain_community.tools import WikipediaQueryRun
from langchain_community.utilities import WikipediaAPIWrapper
from langchain_openai import ChatOpenAI
api_wrapper = WikipediaAPIWrapper(top_k_results=1, doc_content_chars_max=100)
tool = WikipediaQueryRun(api_wrapper=api_wrapper)
tools = [tool]
prompt = hub.pull("hwchase17/react")
llm = | ChatOpenAI(temperature=0) | langchain_openai.ChatOpenAI |
import os
os.environ["EXA_API_KEY"] = "..."
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-exa')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
from langchain_core.prompts import PromptTemplate
from langchain_core.runnables import RunnableParallel, RunnablePassthrough
from langchain_exa import ExaSearchRetriever, TextContentsOptions
from langchain_openai import ChatOpenAI
retriever = ExaSearchRetriever(
k=5, text_contents_options=TextContentsOptions(max_length=200)
)
prompt = PromptTemplate.from_template(
"""Answer the following query based on the following context:
query: {query}
<context>
{context}
</context"""
)
llm = ChatOpenAI()
chain = (
RunnableParallel({"context": retriever, "query": RunnablePassthrough()})
| prompt
| llm
)
chain.invoke("When is the best time to visit japan?")
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-exa')
from exa_py import Exa
from langchain.agents import tool
exa = Exa(api_key=os.environ["EXA_API_KEY"])
@tool
def search(query: str):
"""Search for a webpage based on the query."""
return exa.search(f"{query}", use_autoprompt=True, num_results=5)
@tool
def find_similar(url: str):
"""Search for webpages similar to a given URL.
The url passed in should be a URL returned from `search`.
"""
return exa.find_similar(url, num_results=5)
@tool
def get_contents(ids: list[str]):
"""Get the contents of a webpage.
The ids passed in should be a list of ids returned from `search`.
"""
return exa.get_contents(ids)
tools = [search, get_contents, find_similar]
from langchain.agents import AgentExecutor, OpenAIFunctionsAgent
from langchain_core.messages import SystemMessage
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(temperature=0)
system_message = SystemMessage(
content="You are a web researcher who answers user questions by looking up information on the internet and retrieving contents of helpful documents. Cite your sources."
)
agent_prompt = | OpenAIFunctionsAgent.create_prompt(system_message) | langchain.agents.OpenAIFunctionsAgent.create_prompt |
from langchain_experimental.llm_bash.base import LLMBashChain
from langchain_openai import OpenAI
llm = OpenAI(temperature=0)
text = "Please write a bash script that prints 'Hello World' to the console."
bash_chain = LLMBashChain.from_llm(llm, verbose=True)
bash_chain.run(text)
from langchain.prompts.prompt import PromptTemplate
from langchain_experimental.llm_bash.prompt import BashOutputParser
_PROMPT_TEMPLATE = """If someone asks you to perform a task, your job is to come up with a series of bash commands that will perform the task. There is no need to put "#!/bin/bash" in your answer. Make sure to reason step by step, using this format:
Question: "copy the files in the directory named 'target' into a new directory at the same level as target called 'myNewDirectory'"
I need to take the following actions:
- List all files in the directory
- Create a new directory
- Copy the files from the first directory into the second directory
```bash
ls
mkdir myNewDirectory
cp -r target/* myNewDirectory
```
Do not use 'echo' when writing the script.
That is the format. Begin!
Question: {question}"""
PROMPT = PromptTemplate(
input_variables=["question"],
template=_PROMPT_TEMPLATE,
output_parser= | BashOutputParser() | langchain_experimental.llm_bash.prompt.BashOutputParser |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet rapidfuzz')
from langchain.evaluation import load_evaluator
evaluator = load_evaluator("string_distance")
evaluator.evaluate_strings(
prediction="The job is completely done.",
reference="The job is done",
)
evaluator.evaluate_strings(
prediction="The job is done.",
reference="The job isn't done",
)
from langchain.evaluation import StringDistance
list(StringDistance)
jaro_evaluator = | load_evaluator("string_distance", distance=StringDistance.JARO) | langchain.evaluation.load_evaluator |
from langchain.chains import RetrievalQA
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAI, OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
llm = OpenAI(temperature=0)
from pathlib import Path
relevant_parts = []
for p in Path(".").absolute().parts:
relevant_parts.append(p)
if relevant_parts[-3:] == ["langchain", "docs", "modules"]:
break
doc_path = str(Path(*relevant_parts) / "state_of_the_union.txt")
from langchain_community.document_loaders import TextLoader
loader = TextLoader(doc_path)
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
docsearch = Chroma.from_documents(texts, embeddings, collection_name="state-of-union")
state_of_union = RetrievalQA.from_chain_type(
llm=llm, chain_type="stuff", retriever=docsearch.as_retriever()
)
from langchain_community.document_loaders import WebBaseLoader
loader = WebBaseLoader("https://beta.ruff.rs/docs/faq/")
docs = loader.load()
ruff_texts = text_splitter.split_documents(docs)
ruff_db = Chroma.from_documents(ruff_texts, embeddings, collection_name="ruff")
ruff = RetrievalQA.from_chain_type(
llm=llm, chain_type="stuff", retriever=ruff_db.as_retriever()
)
from langchain.agents import AgentType, Tool, initialize_agent
from langchain_openai import OpenAI
tools = [
Tool(
name="State of Union QA System",
func=state_of_union.run,
description="useful for when you need to answer questions about the most recent state of the union address. Input should be a fully formed question.",
),
Tool(
name="Ruff QA System",
func=ruff.run,
description="useful for when you need to answer questions about ruff (a python linter). Input should be a fully formed question.",
),
]
agent = initialize_agent(
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
agent.run(
"What did biden say about ketanji brown jackson in the state of the union address?"
)
agent.run("Why use ruff over flake8?")
tools = [
Tool(
name="State of Union QA System",
func=state_of_union.run,
description="useful for when you need to answer questions about the most recent state of the union address. Input should be a fully formed question.",
return_direct=True,
),
Tool(
name="Ruff QA System",
func=ruff.run,
description="useful for when you need to answer questions about ruff (a python linter). Input should be a fully formed question.",
return_direct=True,
),
]
agent = initialize_agent(
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
agent.run(
"What did biden say about ketanji brown jackson in the state of the union address?"
)
agent.run("Why use ruff over flake8?")
tools = [
Tool(
name="State of Union QA System",
func=state_of_union.run,
description="useful for when you need to answer questions about the most recent state of the union address. Input should be a fully formed question, not referencing any obscure pronouns from the conversation before.",
),
| Tool(
name="Ruff QA System",
func=ruff.run,
description="useful for when you need to answer questions about ruff (a python linter) | langchain.agents.Tool |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-core langchain langchain-openai')
from langchain.utils.math import cosine_similarity
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import PromptTemplate
from langchain_core.runnables import RunnableLambda, RunnablePassthrough
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
physics_template = """You are a very smart physics professor. \
You are great at answering questions about physics in a concise and easy to understand manner. \
When you don't know the answer to a question you admit that you don't know.
Here is a question:
{query}"""
math_template = """You are a very good mathematician. You are great at answering math questions. \
You are so good because you are able to break down hard problems into their component parts, \
answer the component parts, and then put them together to answer the broader question.
Here is a question:
{query}"""
embeddings = OpenAIEmbeddings()
prompt_templates = [physics_template, math_template]
prompt_embeddings = embeddings.embed_documents(prompt_templates)
def prompt_router(input):
query_embedding = embeddings.embed_query(input["query"])
similarity = cosine_similarity([query_embedding], prompt_embeddings)[0]
most_similar = prompt_templates[similarity.argmax()]
print("Using MATH" if most_similar == math_template else "Using PHYSICS")
return PromptTemplate.from_template(most_similar)
chain = (
{"query": RunnablePassthrough()}
| RunnableLambda(prompt_router)
| | ChatOpenAI() | langchain_openai.ChatOpenAI |
from ragatouille import RAGPretrainedModel
RAG = RAGPretrainedModel.from_pretrained("colbert-ir/colbertv2.0")
import requests
def get_wikipedia_page(title: str):
"""
Retrieve the full text content of a Wikipedia page.
:param title: str - Title of the Wikipedia page.
:return: str - Full text content of the page as raw string.
"""
URL = "https://en.wikipedia.org/w/api.php"
params = {
"action": "query",
"format": "json",
"titles": title,
"prop": "extracts",
"explaintext": True,
}
headers = {"User-Agent": "RAGatouille_tutorial/0.0.1 (ben@clavie.eu)"}
response = requests.get(URL, params=params, headers=headers)
data = response.json()
page = next(iter(data["query"]["pages"].values()))
return page["extract"] if "extract" in page else None
full_document = get_wikipedia_page("Hayao_Miyazaki")
RAG.index(
collection=[full_document],
index_name="Miyazaki-123",
max_document_length=180,
split_documents=True,
)
results = RAG.search(query="What animation studio did Miyazaki found?", k=3)
results
retriever = RAG.as_langchain_retriever(k=3)
retriever.invoke("What animation studio did Miyazaki found?")
from langchain.chains import create_retrieval_chain
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
prompt = ChatPromptTemplate.from_template(
"""Answer the following question based only on the provided context:
<context>
{context}
</context>
Question: {input}"""
)
llm = | ChatOpenAI() | langchain_openai.ChatOpenAI |
get_ipython().system('pip install -U oci')
from langchain_community.llms import OCIGenAI
llm = OCIGenAI(
model_id="MY_MODEL",
service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com",
compartment_id="MY_OCID",
)
response = llm.invoke("Tell me one fact about earth", temperature=0.7)
print(response)
from langchain.chains import LLMChain
from langchain_core.prompts import PromptTemplate
llm = OCIGenAI(
model_id="MY_MODEL",
service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com",
compartment_id="MY_OCID",
auth_type="SECURITY_TOKEN",
auth_profile="MY_PROFILE", # replace with your profile name
model_kwargs={"temperature": 0.7, "top_p": 0.75, "max_tokens": 200},
)
prompt = | PromptTemplate(input_variables=["query"], template="{query}") | langchain_core.prompts.PromptTemplate |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet gpt4all > /dev/null')
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_community.llms import GPT4All
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate.from_template(template)
local_path = (
"./models/ggml-gpt4all-l13b-snoozy.bin" # replace with your desired local file path
)
callbacks = [ | StreamingStdOutCallbackHandler() | langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet runhouse')
import runhouse as rh
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_community.llms import SelfHostedHuggingFaceLLM, SelfHostedPipeline
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1", use_spot=False)
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate.from_template(template)
llm = SelfHostedHuggingFaceLLM(
model_id="gpt2", hardware=gpu, model_reqs=["pip:./", "transformers", "torch"]
)
llm_chain = LLMChain(prompt=prompt, llm=llm)
question = "What NFL team won the Super Bowl in the year Justin Beiber was born?"
llm_chain.run(question)
llm = SelfHostedHuggingFaceLLM(
model_id="google/flan-t5-small",
task="text2text-generation",
hardware=gpu,
)
llm("What is the capital of Germany?")
def load_pipeline():
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
pipeline,
)
model_id = "gpt2"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
pipe = pipeline(
"text-generation", model=model, tokenizer=tokenizer, max_new_tokens=10
)
return pipe
def inference_fn(pipeline, prompt, stop=None):
return pipeline(prompt)[0]["generated_text"][len(prompt) :]
llm = SelfHostedHuggingFaceLLM(
model_load_fn=load_pipeline, hardware=gpu, inference_fn=inference_fn
)
llm("Who is the current US president?")
pipeline = load_pipeline()
llm = SelfHostedPipeline.from_pipeline(
pipeline=pipeline, hardware=gpu, model_reqs=["pip:./", "transformers", "torch"]
)
import pickle
rh.blob(pickle.dumps(pipeline), path="models/pipeline.pkl").save().to(
gpu, path="models"
)
llm = | SelfHostedPipeline.from_pipeline(pipeline="models/pipeline.pkl", hardware=gpu) | langchain_community.llms.SelfHostedPipeline.from_pipeline |
from langchain.agents import Tool
from langchain.chains import RetrievalQA
from langchain_community.document_loaders import PyPDFLoader
from langchain_community.vectorstores import FAISS
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
from pydantic import BaseModel, Field
class DocumentInput(BaseModel):
question: str = Field()
llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613")
tools = []
files = [
{
"name": "alphabet-earnings",
"path": "/Users/harrisonchase/Downloads/2023Q1_alphabet_earnings_release.pdf",
},
{
"name": "tesla-earnings",
"path": "/Users/harrisonchase/Downloads/TSLA-Q1-2023-Update.pdf",
},
]
for file in files:
loader = PyPDFLoader(file["path"])
pages = loader.load_and_split()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(pages)
embeddings = OpenAIEmbeddings()
retriever = FAISS.from_documents(docs, embeddings).as_retriever()
tools.append(
Tool(
args_schema=DocumentInput,
name=file["name"],
description=f"useful when you want to answer questions about {file['name']}",
func= | RetrievalQA.from_chain_type(llm=llm, retriever=retriever) | langchain.chains.RetrievalQA.from_chain_type |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
from langchain_core.runnables import RunnableParallel, RunnablePassthrough
runnable = RunnableParallel(
passed=RunnablePassthrough(),
extra=RunnablePassthrough.assign(mult=lambda x: x["num"] * 3),
modified=lambda x: x["num"] + 1,
)
runnable.invoke({"num": 1})
from langchain_community.vectorstores import FAISS
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
vectorstore = FAISS.from_texts(
["harrison worked at kensho"], embedding=OpenAIEmbeddings()
)
retriever = vectorstore.as_retriever()
template = """Answer the question based only on the following context:
{context}
Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)
model = ChatOpenAI()
retrieval_chain = (
{"context": retriever, "question": RunnablePassthrough()}
| prompt
| model
| | StrOutputParser() | langchain_core.output_parsers.StrOutputParser |
from langchain.chains import LLMChain
from langchain.memory import ConversationBufferMemory
from langchain.prompts import PromptTemplate
from langchain_openai import OpenAI
template = """You are a chatbot having a conversation with a human.
{chat_history}
Human: {human_input}
Chatbot:"""
prompt = PromptTemplate(
input_variables=["chat_history", "human_input"], template=template
)
memory = ConversationBufferMemory(memory_key="chat_history")
llm = OpenAI()
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
verbose=True,
memory=memory,
)
llm_chain.predict(human_input="Hi there my friend")
llm_chain.predict(human_input="Not too bad - how are you?")
from langchain.prompts import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
)
from langchain_core.messages import SystemMessage
from langchain_openai import ChatOpenAI
prompt = ChatPromptTemplate.from_messages(
[
SystemMessage(
content="You are a chatbot having a conversation with a human."
), # The persistent system prompt
MessagesPlaceholder(
variable_name="chat_history"
), # Where the memory will be stored.
HumanMessagePromptTemplate.from_template(
"{human_input}"
), # Where the human input will injected
]
)
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
llm = | ChatOpenAI() | langchain_openai.ChatOpenAI |
get_ipython().system('pip/pip3 install pyepsilla')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
from langchain_community.vectorstores import Epsilla
from langchain_openai import OpenAIEmbeddings
from langchain_community.document_loaders import TextLoader
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
documents = | CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) | langchain_text_splitters.CharacterTextSplitter |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
from langchain.prompts import PromptTemplate
from langchain_core.runnables import ConfigurableField
from langchain_openai import ChatOpenAI
model = ChatOpenAI(temperature=0).configurable_fields(
temperature=ConfigurableField(
id="llm_temperature",
name="LLM Temperature",
description="The temperature of the LLM",
)
)
model.invoke("pick a random number")
model.with_config(configurable={"llm_temperature": 0.9}).invoke("pick a random number")
prompt = PromptTemplate.from_template("Pick a random number above {x}")
chain = prompt | model
chain.invoke({"x": 0})
chain.with_config(configurable={"llm_temperature": 0.9}).invoke({"x": 0})
from langchain.runnables.hub import HubRunnable
prompt = HubRunnable("rlm/rag-prompt").configurable_fields(
owner_repo_commit=ConfigurableField(
id="hub_commit",
name="Hub Commit",
description="The Hub commit to pull from",
)
)
prompt.invoke({"question": "foo", "context": "bar"})
prompt.with_config(configurable={"hub_commit": "rlm/rag-prompt-llama"}).invoke(
{"question": "foo", "context": "bar"}
)
from langchain.prompts import PromptTemplate
from langchain_community.chat_models import ChatAnthropic
from langchain_core.runnables import ConfigurableField
from langchain_openai import ChatOpenAI
llm = ChatAnthropic(temperature=0).configurable_alternatives(
ConfigurableField(id="llm"),
default_key="anthropic",
openai= | ChatOpenAI() | langchain_openai.ChatOpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet timescale-vector')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-openai')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet tiktoken')
import os
from dotenv import find_dotenv, load_dotenv
_ = load_dotenv(find_dotenv())
OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
from typing import Tuple
from datetime import datetime, timedelta
from langchain.docstore.document import Document
from langchain_community.document_loaders import TextLoader
from langchain_community.document_loaders.json_loader import JSONLoader
from langchain_community.vectorstores.timescalevector import TimescaleVector
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../../extras/modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = | CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) | langchain_text_splitters.CharacterTextSplitter |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet hologres-vector')
from langchain_community.vectorstores import Hologres
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
from langchain_community.document_loaders import TextLoader
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = | OpenAIEmbeddings() | langchain_openai.OpenAIEmbeddings |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-pinecone langchain-openai langchain')
from langchain_community.document_loaders import TextLoader
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = | OpenAIEmbeddings() | langchain_openai.OpenAIEmbeddings |
get_ipython().system('pip install gymnasium')
import tenacity
from langchain.output_parsers import RegexParser
from langchain.schema import (
HumanMessage,
SystemMessage,
)
class GymnasiumAgent:
@classmethod
def get_docs(cls, env):
return env.unwrapped.__doc__
def __init__(self, model, env):
self.model = model
self.env = env
self.docs = self.get_docs(env)
self.instructions = """
Your goal is to maximize your return, i.e. the sum of the rewards you receive.
I will give you an observation, reward, terminiation flag, truncation flag, and the return so far, formatted as:
Observation: <observation>
Reward: <reward>
Termination: <termination>
Truncation: <truncation>
Return: <sum_of_rewards>
You will respond with an action, formatted as:
Action: <action>
where you replace <action> with your actual action.
Do nothing else but return the action.
"""
self.action_parser = RegexParser(
regex=r"Action: (.*)", output_keys=["action"], default_output_key="action"
)
self.message_history = []
self.ret = 0
def random_action(self):
action = self.env.action_space.sample()
return action
def reset(self):
self.message_history = [
| SystemMessage(content=self.docs) | langchain.schema.SystemMessage |
get_ipython().run_line_magic('pip', "install --upgrade --quiet faiss-gpu # For CUDA 7.5+ Supported GPU's.")
get_ipython().run_line_magic('pip', 'install --upgrade --quiet faiss-cpu # For CPU Installation')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import FAISS
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../../extras/modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
db = await FAISS.afrom_documents(docs, embeddings)
query = "What did the president say about Ketanji Brown Jackson"
docs = await db.asimilarity_search(query)
print(docs[0].page_content)
docs_and_scores = await db.asimilarity_search_with_score(query)
docs_and_scores[0]
embedding_vector = await embeddings.aembed_query(query)
docs_and_scores = await db.asimilarity_search_by_vector(embedding_vector)
db.save_local("faiss_index")
new_db = FAISS.load_local("faiss_index", embeddings, asynchronous=True)
docs = await new_db.asimilarity_search(query)
docs[0]
from langchain_community.embeddings.huggingface import HuggingFaceEmbeddings
pkl = db.serialize_to_bytes() # serializes the faiss index
embeddings = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
db = FAISS.deserialize_from_bytes(
embeddings=embeddings, serialized=pkl, asynchronous=True
) # Load the index
db1 = await | FAISS.afrom_texts(["foo"], embeddings) | langchain_community.vectorstores.FAISS.afrom_texts |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
from langchain_community.chat_models import ChatAnthropic
from langchain_openai import ChatOpenAI
from unittest.mock import patch
import httpx
from openai import RateLimitError
request = httpx.Request("GET", "/")
response = httpx.Response(200, request=request)
error = RateLimitError("rate limit", response=response, body="")
openai_llm = ChatOpenAI(max_retries=0)
anthropic_llm = ChatAnthropic()
llm = openai_llm.with_fallbacks([anthropic_llm])
with patch("openai.resources.chat.completions.Completions.create", side_effect=error):
try:
print(openai_llm.invoke("Why did the chicken cross the road?"))
except RateLimitError:
print("Hit error")
with patch("openai.resources.chat.completions.Completions.create", side_effect=error):
try:
print(llm.invoke("Why did the chicken cross the road?"))
except RateLimitError:
print("Hit error")
from langchain_core.prompts import ChatPromptTemplate
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You're a nice assistant who always includes a compliment in your response",
),
("human", "Why did the {animal} cross the road"),
]
)
chain = prompt | llm
with patch("openai.resources.chat.completions.Completions.create", side_effect=error):
try:
print(chain.invoke({"animal": "kangaroo"}))
except RateLimitError:
print("Hit error")
from langchain_core.output_parsers import StrOutputParser
chat_prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You're a nice assistant who always includes a compliment in your response",
),
("human", "Why did the {animal} cross the road"),
]
)
chat_model = ChatOpenAI(model_name="gpt-fake")
bad_chain = chat_prompt | chat_model | StrOutputParser()
from langchain.prompts import PromptTemplate
from langchain_openai import OpenAI
prompt_template = """Instructions: You should always include a compliment in your response.
Question: Why did the {animal} cross the road?"""
prompt = PromptTemplate.from_template(prompt_template)
llm = OpenAI()
good_chain = prompt | llm
chain = bad_chain.with_fallbacks([good_chain])
chain.invoke({"animal": "turtle"})
short_llm = ChatOpenAI()
long_llm = ChatOpenAI(model="gpt-3.5-turbo-16k")
llm = short_llm.with_fallbacks([long_llm])
inputs = "What is the next number: " + ", ".join(["one", "two"] * 3000)
try:
print(short_llm.invoke(inputs))
except Exception as e:
print(e)
try:
print(llm.invoke(inputs))
except Exception as e:
print(e)
from langchain.output_parsers import DatetimeOutputParser
prompt = ChatPromptTemplate.from_template(
"what time was {event} (in %Y-%m-%dT%H:%M:%S.%fZ format - only return this value)"
)
openai_35 = ChatOpenAI() | | DatetimeOutputParser() | langchain.output_parsers.DatetimeOutputParser |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"Write out the following equation using algebraic symbols then solve it. Use the format\n\nEQUATION:...\nSOLUTION:...\n\n",
),
("human", "{equation_statement}"),
]
)
model = | ChatOpenAI(temperature=0) | langchain_openai.ChatOpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet neo4j')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-openai')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet tiktoken')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
from langchain.docstore.document import Document
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Neo4jVector
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
url = "bolt://localhost:7687"
username = "neo4j"
password = "pleaseletmein"
db = Neo4jVector.from_documents(
docs, OpenAIEmbeddings(), url=url, username=username, password=password
)
query = "What did the president say about Ketanji Brown Jackson"
docs_with_score = db.similarity_search_with_score(query, k=2)
for doc, score in docs_with_score:
print("-" * 80)
print("Score: ", score)
print(doc.page_content)
print("-" * 80)
index_name = "vector" # default index name
store = Neo4jVector.from_existing_index(
OpenAIEmbeddings(),
url=url,
username=username,
password=password,
index_name=index_name,
)
store.query("CREATE (p:Person {name: 'Tomaz', location:'Slovenia', hobby:'Bicycle'})")
existing_graph = Neo4jVector.from_existing_graph(
embedding=OpenAIEmbeddings(),
url=url,
username=username,
password=password,
index_name="person_index",
node_label="Person",
text_node_properties=["name", "location"],
embedding_node_property="embedding",
)
result = existing_graph.similarity_search("Slovenia", k=1)
result[0]
store.add_documents([Document(page_content="foo")])
docs_with_score = store.similarity_search_with_score("foo")
docs_with_score[0]
hybrid_db = Neo4jVector.from_documents(
docs,
OpenAIEmbeddings(),
url=url,
username=username,
password=password,
search_type="hybrid",
)
index_name = "vector" # default index name
keyword_index_name = "keyword" # default keyword index name
store = Neo4jVector.from_existing_index(
| OpenAIEmbeddings() | langchain_openai.OpenAIEmbeddings |
import asyncio
import os
import nest_asyncio
import pandas as pd
from langchain.docstore.document import Document
from langchain_community.agent_toolkits.pandas.base import create_pandas_dataframe_agent
from langchain_experimental.autonomous_agents import AutoGPT
from langchain_openai import ChatOpenAI
nest_asyncio.apply()
llm = | ChatOpenAI(model_name="gpt-4", temperature=1.0) | langchain_openai.ChatOpenAI |
from langchain.agents import AgentExecutor, BaseMultiActionAgent, Tool
from langchain_community.utilities import SerpAPIWrapper
def random_word(query: str) -> str:
print("\nNow I'm doing this!")
return "foo"
search = SerpAPIWrapper()
tools = [
Tool(
name="Search",
func=search.run,
description="useful for when you need to answer questions about current events",
),
Tool(
name="RandomWord",
func=random_word,
description="call this to get a random word.",
),
]
from typing import Any, List, Tuple, Union
from langchain_core.agents import AgentAction, AgentFinish
class FakeAgent(BaseMultiActionAgent):
"""Fake Custom Agent."""
@property
def input_keys(self):
return ["input"]
def plan(
self, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any
) -> Union[List[AgentAction], AgentFinish]:
"""Given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date,
along with observations
**kwargs: User inputs.
Returns:
Action specifying what tool to use.
"""
if len(intermediate_steps) == 0:
return [
AgentAction(tool="Search", tool_input=kwargs["input"], log=""),
AgentAction(tool="RandomWord", tool_input=kwargs["input"], log=""),
]
else:
return AgentFinish(return_values={"output": "bar"}, log="")
async def aplan(
self, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any
) -> Union[List[AgentAction], AgentFinish]:
"""Given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date,
along with observations
**kwargs: User inputs.
Returns:
Action specifying what tool to use.
"""
if len(intermediate_steps) == 0:
return [
AgentAction(tool="Search", tool_input=kwargs["input"], log=""),
AgentAction(tool="RandomWord", tool_input=kwargs["input"], log=""),
]
else:
return | AgentFinish(return_values={"output": "bar"}, log="") | langchain_core.agents.AgentFinish |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet "cassio>=0.1.4"')
import os
from getpass import getpass
from datasets import (
load_dataset,
)
from langchain_community.document_loaders import PyPDFLoader
from langchain_core.documents import Document
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
os.environ["OPENAI_API_KEY"] = getpass("OPENAI_API_KEY = ")
embe = OpenAIEmbeddings()
from langchain_community.vectorstores import Cassandra
from cassandra.cluster import Cluster
cluster = Cluster(["127.0.0.1"])
session = cluster.connect()
import cassio
CASSANDRA_KEYSPACE = input("CASSANDRA_KEYSPACE = ")
cassio.init(session=session, keyspace=CASSANDRA_KEYSPACE)
vstore = Cassandra(
embedding=embe,
table_name="cassandra_vector_demo",
)
ASTRA_DB_ID = input("ASTRA_DB_ID = ")
ASTRA_DB_APPLICATION_TOKEN = getpass("ASTRA_DB_APPLICATION_TOKEN = ")
desired_keyspace = input("ASTRA_DB_KEYSPACE (optional, can be left empty) = ")
if desired_keyspace:
ASTRA_DB_KEYSPACE = desired_keyspace
else:
ASTRA_DB_KEYSPACE = None
import cassio
cassio.init(
database_id=ASTRA_DB_ID,
token=ASTRA_DB_APPLICATION_TOKEN,
keyspace=ASTRA_DB_KEYSPACE,
)
vstore = Cassandra(
embedding=embe,
table_name="cassandra_vector_demo",
)
philo_dataset = load_dataset("datastax/philosopher-quotes")["train"]
docs = []
for entry in philo_dataset:
metadata = {"author": entry["author"]}
doc = Document(page_content=entry["quote"], metadata=metadata)
docs.append(doc)
inserted_ids = vstore.add_documents(docs)
print(f"\nInserted {len(inserted_ids)} documents.")
texts = ["I think, therefore I am.", "To the things themselves!"]
metadatas = [{"author": "descartes"}, {"author": "husserl"}]
ids = ["desc_01", "huss_xy"]
inserted_ids_2 = vstore.add_texts(texts=texts, metadatas=metadatas, ids=ids)
print(f"\nInserted {len(inserted_ids_2)} documents.")
results = vstore.similarity_search("Our life is what we make of it", k=3)
for res in results:
print(f"* {res.page_content} [{res.metadata}]")
results_filtered = vstore.similarity_search(
"Our life is what we make of it",
k=3,
filter={"author": "plato"},
)
for res in results_filtered:
print(f"* {res.page_content} [{res.metadata}]")
results = vstore.similarity_search_with_score("Our life is what we make of it", k=3)
for res, score in results:
print(f"* [SIM={score:3f}] {res.page_content} [{res.metadata}]")
results = vstore.max_marginal_relevance_search(
"Our life is what we make of it",
k=3,
filter={"author": "aristotle"},
)
for res in results:
print(f"* {res.page_content} [{res.metadata}]")
delete_1 = vstore.delete(inserted_ids[:3])
print(f"all_succeed={delete_1}") # True, all documents deleted
delete_2 = vstore.delete(inserted_ids[2:5])
print(f"some_succeeds={delete_2}") # True, though some IDs were gone already
get_ipython().system('curl -L "https://github.com/awesome-astra/datasets/blob/main/demo-resources/what-is-philosophy/what-is-philosophy.pdf?raw=true" -o "what-is-philosophy.pdf"')
pdf_loader = PyPDFLoader("what-is-philosophy.pdf")
splitter = RecursiveCharacterTextSplitter(chunk_size=512, chunk_overlap=64)
docs_from_pdf = pdf_loader.load_and_split(text_splitter=splitter)
print(f"Documents from PDF: {len(docs_from_pdf)}.")
inserted_ids_from_pdf = vstore.add_documents(docs_from_pdf)
print(f"Inserted {len(inserted_ids_from_pdf)} documents.")
retriever = vstore.as_retriever(search_kwargs={"k": 3})
philo_template = """
You are a philosopher that draws inspiration from great thinkers of the past
to craft well-thought answers to user questions. Use the provided context as the basis
for your answers and do not make up new reasoning paths - just mix-and-match what you are given.
Your answers must be concise and to the point, and refrain from answering about other topics than philosophy.
CONTEXT:
{context}
QUESTION: {question}
YOUR ANSWER:"""
philo_prompt = ChatPromptTemplate.from_template(philo_template)
llm = ChatOpenAI()
chain = (
{"context": retriever, "question": RunnablePassthrough()}
| philo_prompt
| llm
| | StrOutputParser() | langchain_core.output_parsers.StrOutputParser |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet jsonformer > /dev/null')
import logging
logging.basicConfig(level=logging.ERROR)
import json
import os
import requests
from langchain.tools import tool
HF_TOKEN = os.environ.get("HUGGINGFACE_API_KEY")
@tool
def ask_star_coder(query: str, temperature: float = 1.0, max_new_tokens: float = 250):
"""Query the BigCode StarCoder model about coding questions."""
url = "https://api-inference.huggingface.co/models/bigcode/starcoder"
headers = {
"Authorization": f"Bearer {HF_TOKEN}",
"content-type": "application/json",
}
payload = {
"inputs": f"{query}\n\nAnswer:",
"temperature": temperature,
"max_new_tokens": int(max_new_tokens),
}
response = requests.post(url, headers=headers, data=json.dumps(payload))
response.raise_for_status()
return json.loads(response.content.decode("utf-8"))
prompt = """You must respond using JSON format, with a single action and single action input.
You may 'ask_star_coder' for help on coding problems.
{arg_schema}
EXAMPLES
----
Human: "So what's all this about a GIL?"
AI Assistant:{{
"action": "ask_star_coder",
"action_input": {{"query": "What is a GIL?", "temperature": 0.0, "max_new_tokens": 100}}"
}}
Observation: "The GIL is python's Global Interpreter Lock"
Human: "Could you please write a calculator program in LISP?"
AI Assistant:{{
"action": "ask_star_coder",
"action_input": {{"query": "Write a calculator program in LISP", "temperature": 0.0, "max_new_tokens": 250}}
}}
Observation: "(defun add (x y) (+ x y))\n(defun sub (x y) (- x y ))"
Human: "What's the difference between an SVM and an LLM?"
AI Assistant:{{
"action": "ask_star_coder",
"action_input": {{"query": "What's the difference between SGD and an SVM?", "temperature": 1.0, "max_new_tokens": 250}}
}}
Observation: "SGD stands for stochastic gradient descent, while an SVM is a Support Vector Machine."
BEGIN! Answer the Human's question as best as you are able.
------
Human: 'What's the difference between an iterator and an iterable?'
AI Assistant:""".format(arg_schema=ask_star_coder.args)
from langchain_community.llms import HuggingFacePipeline
from transformers import pipeline
hf_model = pipeline(
"text-generation", model="cerebras/Cerebras-GPT-590M", max_new_tokens=200
)
original_model = HuggingFacePipeline(pipeline=hf_model)
generated = original_model.predict(prompt, stop=["Observation:", "Human:"])
print(generated)
decoder_schema = {
"title": "Decoding Schema",
"type": "object",
"properties": {
"action": {"type": "string", "default": ask_star_coder.name},
"action_input": {
"type": "object",
"properties": ask_star_coder.args,
},
},
}
from langchain_experimental.llms import JsonFormer
json_former = | JsonFormer(json_schema=decoder_schema, pipeline=hf_model) | langchain_experimental.llms.JsonFormer |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet aphrodite-engine==0.4.2')
from langchain_community.llms import Aphrodite
llm = Aphrodite(
model="PygmalionAI/pygmalion-2-7b",
trust_remote_code=True, # mandatory for hf models
max_tokens=128,
temperature=1.2,
min_p=0.05,
mirostat_mode=0, # change to 2 to use mirostat
mirostat_tau=5.0,
mirostat_eta=0.1,
)
print(
llm(
'<|system|>Enter RP mode. You are Ayumu "Osaka" Kasuga.<|user|>Hey Osaka. Tell me about yourself.<|model|>'
)
)
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate.from_template(template)
llm_chain = | LLMChain(prompt=prompt, llm=llm) | langchain.chains.LLMChain |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-openai')
import os
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_community.llms import GooseAI
from getpass import getpass
GOOSEAI_API_KEY = getpass()
os.environ["GOOSEAI_API_KEY"] = GOOSEAI_API_KEY
llm = | GooseAI() | langchain_community.llms.GooseAI |
get_ipython().system('pip install pettingzoo pygame rlcard')
import collections
import inspect
import tenacity
from langchain.output_parsers import RegexParser
from langchain.schema import (
HumanMessage,
SystemMessage,
)
from langchain_openai import ChatOpenAI
class GymnasiumAgent:
@classmethod
def get_docs(cls, env):
return env.unwrapped.__doc__
def __init__(self, model, env):
self.model = model
self.env = env
self.docs = self.get_docs(env)
self.instructions = """
Your goal is to maximize your return, i.e. the sum of the rewards you receive.
I will give you an observation, reward, terminiation flag, truncation flag, and the return so far, formatted as:
Observation: <observation>
Reward: <reward>
Termination: <termination>
Truncation: <truncation>
Return: <sum_of_rewards>
You will respond with an action, formatted as:
Action: <action>
where you replace <action> with your actual action.
Do nothing else but return the action.
"""
self.action_parser = RegexParser(
regex=r"Action: (.*)", output_keys=["action"], default_output_key="action"
)
self.message_history = []
self.ret = 0
def random_action(self):
action = self.env.action_space.sample()
return action
def reset(self):
self.message_history = [
SystemMessage(content=self.docs),
SystemMessage(content=self.instructions),
]
def observe(self, obs, rew=0, term=False, trunc=False, info=None):
self.ret += rew
obs_message = f"""
Observation: {obs}
Reward: {rew}
Termination: {term}
Truncation: {trunc}
Return: {self.ret}
"""
self.message_history.append(HumanMessage(content=obs_message))
return obs_message
def _act(self):
act_message = self.model(self.message_history)
self.message_history.append(act_message)
action = int(self.action_parser.parse(act_message.content)["action"])
return action
def act(self):
try:
for attempt in tenacity.Retrying(
stop=tenacity.stop_after_attempt(2),
wait=tenacity.wait_none(), # No waiting time between retries
retry=tenacity.retry_if_exception_type(ValueError),
before_sleep=lambda retry_state: print(
f"ValueError occurred: {retry_state.outcome.exception()}, retrying..."
),
):
with attempt:
action = self._act()
except tenacity.RetryError:
action = self.random_action()
return action
def main(agents, env):
env.reset()
for name, agent in agents.items():
agent.reset()
for agent_name in env.agent_iter():
observation, reward, termination, truncation, info = env.last()
obs_message = agents[agent_name].observe(
observation, reward, termination, truncation, info
)
print(obs_message)
if termination or truncation:
action = None
else:
action = agents[agent_name].act()
print(f"Action: {action}")
env.step(action)
env.close()
class PettingZooAgent(GymnasiumAgent):
@classmethod
def get_docs(cls, env):
return inspect.getmodule(env.unwrapped).__doc__
def __init__(self, name, model, env):
super().__init__(model, env)
self.name = name
def random_action(self):
action = self.env.action_space(self.name).sample()
return action
from pettingzoo.classic import rps_v2
env = rps_v2.env(max_cycles=3, render_mode="human")
agents = {
name: PettingZooAgent(name=name, model=ChatOpenAI(temperature=1), env=env)
for name in env.possible_agents
}
main(agents, env)
class ActionMaskAgent(PettingZooAgent):
def __init__(self, name, model, env):
super().__init__(name, model, env)
self.obs_buffer = collections.deque(maxlen=1)
def random_action(self):
obs = self.obs_buffer[-1]
action = self.env.action_space(self.name).sample(obs["action_mask"])
return action
def reset(self):
self.message_history = [
SystemMessage(content=self.docs),
SystemMessage(content=self.instructions),
]
def observe(self, obs, rew=0, term=False, trunc=False, info=None):
self.obs_buffer.append(obs)
return super().observe(obs, rew, term, trunc, info)
def _act(self):
valid_action_instruction = "Generate a valid action given by the indices of the `action_mask` that are not 0, according to the action formatting rules."
self.message_history.append(HumanMessage(content=valid_action_instruction))
return super()._act()
from pettingzoo.classic import tictactoe_v3
env = tictactoe_v3.env(render_mode="human")
agents = {
name: ActionMaskAgent(name=name, model=ChatOpenAI(temperature=0.2), env=env)
for name in env.possible_agents
}
main(agents, env)
from pettingzoo.classic import texas_holdem_no_limit_v6
env = texas_holdem_no_limit_v6.env(num_players=4, render_mode="human")
agents = {
name: ActionMaskAgent(name=name, model= | ChatOpenAI(temperature=0.2) | langchain_openai.ChatOpenAI |
get_ipython().run_line_magic('pip', 'install -qU langchain langchain-openai langchain-anthropic langchain-community wikipedia')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass()
os.environ["ANTHROPIC_API_KEY"] = getpass.getpass()
from langchain_community.retrievers import WikipediaRetriever
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
wiki = WikipediaRetriever(top_k_results=6, doc_content_chars_max=2000)
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You're a helpful AI assistant. Given a user question and some Wikipedia article snippets, answer the user question. If none of the articles answer the question, just say you don't know.\n\nHere are the Wikipedia articles:{context}",
),
("human", "{question}"),
]
)
prompt.pretty_print()
from operator import itemgetter
from typing import List
from langchain_core.documents import Document
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import (
RunnableLambda,
RunnableParallel,
RunnablePassthrough,
)
def format_docs(docs: List[Document]) -> str:
"""Convert Documents to a single string.:"""
formatted = [
f"Article Title: {doc.metadata['title']}\nArticle Snippet: {doc.page_content}"
for doc in docs
]
return "\n\n" + "\n\n".join(formatted)
format = itemgetter("docs") | RunnableLambda(format_docs)
answer = prompt | llm | StrOutputParser()
chain = (
RunnableParallel(question=RunnablePassthrough(), docs=wiki)
.assign(context=format)
.assign(answer=answer)
.pick(["answer", "docs"])
)
chain.invoke("How fast are cheetahs?")
from langchain_core.pydantic_v1 import BaseModel, Field
class cited_answer(BaseModel):
"""Answer the user question based only on the given sources, and cite the sources used."""
answer: str = Field(
...,
description="The answer to the user question, which is based only on the given sources.",
)
citations: List[int] = Field(
...,
description="The integer IDs of the SPECIFIC sources which justify the answer.",
)
llm_with_tool = llm.bind_tools(
[cited_answer],
tool_choice="cited_answer",
)
example_q = """What Brian's height?
Source: 1
Information: Suzy is 6'2"
Source: 2
Information: Jeremiah is blonde
Source: 3
Information: Brian is 3 inches shorted than Suzy"""
llm_with_tool.invoke(example_q)
from langchain.output_parsers.openai_tools import JsonOutputKeyToolsParser
output_parser = JsonOutputKeyToolsParser(key_name="cited_answer", return_single=True)
(llm_with_tool | output_parser).invoke(example_q)
def format_docs_with_id(docs: List[Document]) -> str:
formatted = [
f"Source ID: {i}\nArticle Title: {doc.metadata['title']}\nArticle Snippet: {doc.page_content}"
for i, doc in enumerate(docs)
]
return "\n\n" + "\n\n".join(formatted)
format_1 = itemgetter("docs") | RunnableLambda(format_docs_with_id)
answer_1 = prompt | llm_with_tool | output_parser
chain_1 = (
RunnableParallel(question=RunnablePassthrough(), docs=wiki)
.assign(context=format_1)
.assign(cited_answer=answer_1)
.pick(["cited_answer", "docs"])
)
chain_1.invoke("How fast are cheetahs?")
class Citation(BaseModel):
source_id: int = Field(
...,
description="The integer ID of a SPECIFIC source which justifies the answer.",
)
quote: str = Field(
...,
description="The VERBATIM quote from the specified source that justifies the answer.",
)
class quoted_answer(BaseModel):
"""Answer the user question based only on the given sources, and cite the sources used."""
answer: str = Field(
...,
description="The answer to the user question, which is based only on the given sources.",
)
citations: List[Citation] = Field(
..., description="Citations from the given sources that justify the answer."
)
output_parser_2 = JsonOutputKeyToolsParser(key_name="quoted_answer", return_single=True)
llm_with_tool_2 = llm.bind_tools(
[quoted_answer],
tool_choice="quoted_answer",
)
format_2 = itemgetter("docs") | RunnableLambda(format_docs_with_id)
answer_2 = prompt | llm_with_tool_2 | output_parser_2
chain_2 = (
RunnableParallel(question=RunnablePassthrough(), docs=wiki)
.assign(context=format_2)
.assign(quoted_answer=answer_2)
.pick(["quoted_answer", "docs"])
)
chain_2.invoke("How fast are cheetahs?")
from langchain_anthropic import ChatAnthropicMessages
anthropic = ChatAnthropicMessages(model_name="claude-instant-1.2")
system = """You're a helpful AI assistant. Given a user question and some Wikipedia article snippets, \
answer the user question and provide citations. If none of the articles answer the question, just say you don't know.
Remember, you must return both an answer and citations. A citation consists of a VERBATIM quote that \
justifies the answer and the ID of the quote article. Return a citation for every quote across all articles \
that justify the answer. Use the following format for your final output:
<cited_answer>
<answer></answer>
<citations>
<citation><source_id></source_id><quote></quote></citation>
<citation><source_id></source_id><quote></quote></citation>
...
</citations>
</cited_answer>
Here are the Wikipedia articles:{context}"""
prompt_3 = ChatPromptTemplate.from_messages(
[("system", system), ("human", "{question}")]
)
from langchain_core.output_parsers import XMLOutputParser
def format_docs_xml(docs: List[Document]) -> str:
formatted = []
for i, doc in enumerate(docs):
doc_str = f"""\
<source id=\"{i}\">
<title>{doc.metadata['title']}</title>
<article_snippet>{doc.page_content}</article_snippet>
</source>"""
formatted.append(doc_str)
return "\n\n<sources>" + "\n".join(formatted) + "</sources>"
format_3 = itemgetter("docs") | RunnableLambda(format_docs_xml)
answer_3 = prompt_3 | anthropic | XMLOutputParser() | itemgetter("cited_answer")
chain_3 = (
RunnableParallel(question=RunnablePassthrough(), docs=wiki)
.assign(context=format_3)
.assign(cited_answer=answer_3)
.pick(["cited_answer", "docs"])
)
chain_3.invoke("How fast are cheetahs?")
from langchain.retrievers.document_compressors import EmbeddingsFilter
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
splitter = RecursiveCharacterTextSplitter(
chunk_size=400,
chunk_overlap=0,
separators=["\n\n", "\n", ".", " "],
keep_separator=False,
)
compressor = EmbeddingsFilter(embeddings=OpenAIEmbeddings(), k=10)
def split_and_filter(input) -> List[Document]:
docs = input["docs"]
question = input["question"]
split_docs = splitter.split_documents(docs)
stateful_docs = compressor.compress_documents(split_docs, question)
return [stateful_doc for stateful_doc in stateful_docs]
retrieve = (
RunnableParallel(question= | RunnablePassthrough() | langchain_core.runnables.RunnablePassthrough |
from langchain.prompts import PromptTemplate
prompt = (
PromptTemplate.from_template("Tell me a joke about {topic}")
+ ", make it funny"
+ "\n\nand in {language}"
)
prompt
prompt.format(topic="sports", language="spanish")
from langchain.chains import LLMChain
from langchain_openai import ChatOpenAI
model = | ChatOpenAI() | langchain_openai.ChatOpenAI |
get_ipython().system('poetry run pip install dgml-utils==0.3.0 --upgrade --quiet')
import os
from langchain_community.document_loaders import DocugamiLoader
DOCUGAMI_API_KEY = os.environ.get("DOCUGAMI_API_KEY")
docset_id = "26xpy3aes7xp"
document_ids = ["d7jqdzcj50sj", "cgd1eacfkchw"]
loader = DocugamiLoader(docset_id=docset_id, document_ids=document_ids)
chunks = loader.load()
len(chunks)
loader.min_text_length = 64
loader.include_xml_tags = True
chunks = loader.load()
for chunk in chunks[:5]:
print(chunk)
get_ipython().system('poetry run pip install --upgrade langchain-openai tiktoken chromadb hnswlib')
loader = DocugamiLoader(docset_id="zo954yqy53wp")
chunks = loader.load()
for chunk in chunks:
stripped_metadata = chunk.metadata.copy()
for key in chunk.metadata:
if key not in ["name", "xpath", "id", "structure"]:
del stripped_metadata[key]
chunk.metadata = stripped_metadata
print(len(chunks))
from langchain.chains import RetrievalQA
from langchain_community.vectorstores.chroma import Chroma
from langchain_openai import OpenAI, OpenAIEmbeddings
embedding = OpenAIEmbeddings()
vectordb = Chroma.from_documents(documents=chunks, embedding=embedding)
retriever = vectordb.as_retriever()
qa_chain = RetrievalQA.from_chain_type(
llm=OpenAI(), chain_type="stuff", retriever=retriever, return_source_documents=True
)
qa_chain("What can tenants do with signage on their properties?")
chain_response = qa_chain("What is rentable area for the property owned by DHA Group?")
chain_response["result"] # correct answer should be 13,500 sq ft
chain_response["source_documents"]
loader = DocugamiLoader(docset_id="zo954yqy53wp")
loader.include_xml_tags = (
True # for additional semantics from the Docugami knowledge graph
)
chunks = loader.load()
print(chunks[0].metadata)
get_ipython().system('poetry run pip install --upgrade lark --quiet')
from langchain.chains.query_constructor.schema import AttributeInfo
from langchain.retrievers.self_query.base import SelfQueryRetriever
from langchain_community.vectorstores.chroma import Chroma
EXCLUDE_KEYS = ["id", "xpath", "structure"]
metadata_field_info = [
AttributeInfo(
name=key,
description=f"The {key} for this chunk",
type="string",
)
for key in chunks[0].metadata
if key.lower() not in EXCLUDE_KEYS
]
document_content_description = "Contents of this chunk"
llm = OpenAI(temperature=0)
vectordb = Chroma.from_documents(documents=chunks, embedding=embedding)
retriever = SelfQueryRetriever.from_llm(
llm, vectordb, document_content_description, metadata_field_info, verbose=True
)
qa_chain = RetrievalQA.from_chain_type(
llm= | OpenAI() | langchain_openai.OpenAI |
import xorbits.pandas as pd
from langchain_experimental.agents.agent_toolkits import create_xorbits_agent
from langchain_openai import OpenAI
data = pd.read_csv("titanic.csv")
agent = create_xorbits_agent(OpenAI(temperature=0), data, verbose=True)
agent.run("How many rows and columns are there?")
agent.run("How many people are in pclass 1?")
agent.run("whats the mean age?")
agent.run("Group the data by sex and find the average age for each group")
agent.run(
"Show the number of people whose age is greater than 30 and fare is between 30 and 50 , and pclass is either 1 or 2"
)
import xorbits.numpy as np
from langchain.agents import create_xorbits_agent
from langchain_openai import OpenAI
arr = np.array([1, 2, 3, 4, 5, 6])
agent = create_xorbits_agent( | OpenAI(temperature=0) | langchain_openai.OpenAI |
import os
import yaml
get_ipython().system('wget https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml -O openai_openapi.yaml')
get_ipython().system('wget https://www.klarna.com/us/shopping/public/openai/v0/api-docs -O klarna_openapi.yaml')
get_ipython().system('wget https://raw.githubusercontent.com/APIs-guru/openapi-directory/main/APIs/spotify.com/1.0.0/openapi.yaml -O spotify_openapi.yaml')
from langchain_community.agent_toolkits.openapi.spec import reduce_openapi_spec
with open("openai_openapi.yaml") as f:
raw_openai_api_spec = yaml.load(f, Loader=yaml.Loader)
openai_api_spec = reduce_openapi_spec(raw_openai_api_spec)
with open("klarna_openapi.yaml") as f:
raw_klarna_api_spec = yaml.load(f, Loader=yaml.Loader)
klarna_api_spec = reduce_openapi_spec(raw_klarna_api_spec)
with open("spotify_openapi.yaml") as f:
raw_spotify_api_spec = yaml.load(f, Loader=yaml.Loader)
spotify_api_spec = reduce_openapi_spec(raw_spotify_api_spec)
import spotipy.util as util
from langchain.requests import RequestsWrapper
def construct_spotify_auth_headers(raw_spec: dict):
scopes = list(
raw_spec["components"]["securitySchemes"]["oauth_2_0"]["flows"][
"authorizationCode"
]["scopes"].keys()
)
access_token = util.prompt_for_user_token(scope=",".join(scopes))
return {"Authorization": f"Bearer {access_token}"}
headers = construct_spotify_auth_headers(raw_spotify_api_spec)
requests_wrapper = RequestsWrapper(headers=headers)
endpoints = [
(route, operation)
for route, operations in raw_spotify_api_spec["paths"].items()
for operation in operations
if operation in ["get", "post"]
]
len(endpoints)
import tiktoken
enc = tiktoken.encoding_for_model("gpt-4")
def count_tokens(s):
return len(enc.encode(s))
count_tokens(yaml.dump(raw_spotify_api_spec))
from langchain_community.agent_toolkits.openapi import planner
from langchain_openai import OpenAI
llm = OpenAI(model_name="gpt-4", temperature=0.0)
spotify_agent = planner.create_openapi_agent(spotify_api_spec, requests_wrapper, llm)
user_query = (
"make me a playlist with the first song from kind of blue. call it machine blues."
)
spotify_agent.run(user_query)
user_query = "give me a song I'd like, make it blues-ey"
spotify_agent.run(user_query)
headers = {"Authorization": f"Bearer {os.getenv('OPENAI_API_KEY')}"}
openai_requests_wrapper = RequestsWrapper(headers=headers)
llm = OpenAI(model_name="gpt-4", temperature=0.25)
openai_agent = planner.create_openapi_agent(
openai_api_spec, openai_requests_wrapper, llm
)
user_query = "generate a short piece of advice"
openai_agent.run(user_query)
from langchain.agents import create_openapi_agent
from langchain_community.agent_toolkits import OpenAPIToolkit
from langchain_community.tools.json.tool import JsonSpec
from langchain_openai import OpenAI
with open("openai_openapi.yaml") as f:
data = yaml.load(f, Loader=yaml.FullLoader)
json_spec = JsonSpec(dict_=data, max_value_length=4000)
openapi_toolkit = OpenAPIToolkit.from_llm(
OpenAI(temperature=0), json_spec, openai_requests_wrapper, verbose=True
)
openapi_agent_executor = create_openapi_agent(
llm= | OpenAI(temperature=0) | langchain_openai.OpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet elasticsearch')
from langchain.retrievers import ElasticSearchBM25Retriever
elasticsearch_url = "http://localhost:9200"
retriever = | ElasticSearchBM25Retriever.create(elasticsearch_url, "langchain-index-4") | langchain.retrievers.ElasticSearchBM25Retriever.create |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet johnsnowlabs')
from langchain_community.embeddings.johnsnowlabs import JohnSnowLabsEmbeddings
embedder = | JohnSnowLabsEmbeddings("en.embed_sentence.biobert.clinical_base_cased") | langchain_community.embeddings.johnsnowlabs.JohnSnowLabsEmbeddings |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet sentence_transformers')
from langchain_community.embeddings import HuggingFaceEmbeddings
embeddings = HuggingFaceEmbeddings()
from langchain_community.document_loaders import TextLoader
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=400, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
print(len(docs))
import getpass
import os
os.environ["SEMADB_API_KEY"] = getpass.getpass("SemaDB API Key:")
from langchain_community.vectorstores import SemaDB
from langchain_community.vectorstores.utils import DistanceStrategy
db = | SemaDB("mycollection", 768, embeddings, DistanceStrategy.COSINE) | langchain_community.vectorstores.SemaDB |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet clarifai')
from getpass import getpass
CLARIFAI_PAT = getpass()
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Clarifai
from langchain_text_splitters import CharacterTextSplitter
USER_ID = "USERNAME_ID"
APP_ID = "APPLICATION_ID"
NUMBER_OF_DOCS = 2
texts = [
"I really enjoy spending time with you",
"I hate spending time with my dog",
"I want to go for a run",
"I went to the movies yesterday",
"I love playing soccer with my friends",
]
metadatas = [
{"id": i, "text": text, "source": "book 1", "category": ["books", "modern"]}
for i, text in enumerate(texts)
]
idlist = ["text1", "text2", "text3", "text4", "text5"]
metadatas = [
{"id": idlist[i], "text": text, "source": "book 1", "category": ["books", "modern"]}
for i, text in enumerate(texts)
]
clarifai_vector_db = Clarifai(
user_id=USER_ID,
app_id=APP_ID,
number_of_docs=NUMBER_OF_DOCS,
)
response = clarifai_vector_db.add_texts(texts=texts, ids=idlist, metadatas=metadatas)
response = clarifai_vector_db.add_texts(texts=texts)
clarifai_vector_db = Clarifai.from_texts(
user_id=USER_ID,
app_id=APP_ID,
texts=texts,
metadatas=metadatas,
)
docs = clarifai_vector_db.similarity_search("I would like to see you")
docs
book1_similar_docs = clarifai_vector_db.similarity_search(
"I would love to see you", filter={"source": "book 1"}
)
book_category_similar_docs = clarifai_vector_db.similarity_search(
"I would love to see you", filter={"category": ["books"]}
)
loader = | TextLoader("your_local_file_path.txt") | langchain_community.document_loaders.TextLoader |
import os
from langchain_community.utilities import OpenWeatherMapAPIWrapper
os.environ["OPENWEATHERMAP_API_KEY"] = ""
weather = | OpenWeatherMapAPIWrapper() | langchain_community.utilities.OpenWeatherMapAPIWrapper |
from langchain_openai import ChatOpenAI
model = ChatOpenAI(temperature=0, model="gpt-4-turbo-preview")
from langchain import hub
from langchain_core.prompts import PromptTemplate
select_prompt = hub.pull("hwchase17/self-discovery-select")
select_prompt.pretty_print()
adapt_prompt = hub.pull("hwchase17/self-discovery-adapt")
adapt_prompt.pretty_print()
structured_prompt = | hub.pull("hwchase17/self-discovery-structure") | langchain.hub.pull |
get_ipython().system('pip install --upgrade langchain langchain-google-vertexai')
project: str = "PUT_YOUR_PROJECT_ID_HERE" # @param {type:"string"}
endpoint_id: str = "PUT_YOUR_ENDPOINT_ID_HERE" # @param {type:"string"}
location: str = "PUT_YOUR_ENDPOINT_LOCAtION_HERE" # @param {type:"string"}
from langchain_google_vertexai import (
GemmaChatVertexAIModelGarden,
GemmaVertexAIModelGarden,
)
llm = GemmaVertexAIModelGarden(
endpoint_id=endpoint_id,
project=project,
location=location,
)
output = llm.invoke("What is the meaning of life?")
print(output)
from langchain_core.messages import HumanMessage
llm = GemmaChatVertexAIModelGarden(
endpoint_id=endpoint_id,
project=project,
location=location,
)
message1 = HumanMessage(content="How much is 2+2?")
answer1 = llm.invoke([message1])
print(answer1)
message2 = HumanMessage(content="How much is 3+3?")
answer2 = llm.invoke([message1, answer1, message2])
print(answer2)
answer1 = llm.invoke([message1], parse_response=True)
print(answer1)
answer2 = llm.invoke([message1, answer1, message2], parse_response=True)
print(answer2)
get_ipython().system('mkdir -p ~/.kaggle && cp kaggle.json ~/.kaggle/kaggle.json')
get_ipython().system('pip install keras>=3 keras_nlp')
from langchain_google_vertexai import GemmaLocalKaggle
keras_backend: str = "jax" # @param {type:"string"}
model_name: str = "gemma_2b_en" # @param {type:"string"}
llm = GemmaLocalKaggle(model_name=model_name, keras_backend=keras_backend)
output = llm.invoke("What is the meaning of life?", max_tokens=30)
print(output)
from langchain_google_vertexai import GemmaChatLocalKaggle
keras_backend: str = "jax" # @param {type:"string"}
model_name: str = "gemma_2b_en" # @param {type:"string"}
llm = GemmaChatLocalKaggle(model_name=model_name, keras_backend=keras_backend)
from langchain_core.messages import HumanMessage
message1 = HumanMessage(content="Hi! Who are you?")
answer1 = llm.invoke([message1], max_tokens=30)
print(answer1)
message2 = HumanMessage(content="What can you help me with?")
answer2 = llm.invoke([message1, answer1, message2], max_tokens=60)
print(answer2)
answer1 = llm.invoke([message1], max_tokens=30, parse_response=True)
print(answer1)
answer2 = llm.invoke([message1, answer1, message2], max_tokens=60, parse_response=True)
print(answer2)
from langchain_google_vertexai import GemmaChatLocalHF, GemmaLocalHF
hf_access_token: str = "PUT_YOUR_TOKEN_HERE" # @param {type:"string"}
model_name: str = "google/gemma-2b" # @param {type:"string"}
llm = GemmaLocalHF(model_name="google/gemma-2b", hf_access_token=hf_access_token)
output = llm.invoke("What is the meaning of life?", max_tokens=50)
print(output)
llm = | GemmaChatLocalHF(model_name=model_name, hf_access_token=hf_access_token) | langchain_google_vertexai.GemmaChatLocalHF |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet manifest-ml')
from langchain_community.llms.manifest import ManifestWrapper
from manifest import Manifest
manifest = Manifest(
client_name="huggingface", client_connection="http://127.0.0.1:5000"
)
print(manifest.client_pool.get_current_client().get_model_params())
llm = ManifestWrapper(
client=manifest, llm_kwargs={"temperature": 0.001, "max_tokens": 256}
)
from langchain.chains.mapreduce import MapReduceChain
from langchain.prompts import PromptTemplate
from langchain_text_splitters import CharacterTextSplitter
_prompt = """Write a concise summary of the following:
{text}
CONCISE SUMMARY:"""
prompt = | PromptTemplate.from_template(_prompt) | langchain.prompts.PromptTemplate.from_template |
get_ipython().run_cell_magic('writefile', 'discord_chats.txt', "talkingtower β 08/15/2023 11:10 AM\nLove music! Do you like jazz?\nreporterbob β 08/15/2023 9:27 PM\nYes! Jazz is fantastic. Ever heard this one?\nWebsite\nListen to classic jazz track...\n\ntalkingtower β Yesterday at 5:03 AM\nIndeed! Great choice. π·\nreporterbob β Yesterday at 5:23 AM\nThanks! How about some virtual sightseeing?\nWebsite\nVirtual tour of famous landmarks...\n\ntalkingtower β Today at 2:38 PM\nSounds fun! Let's explore.\nreporterbob β Today at 2:56 PM\nEnjoy the tour! See you around.\ntalkingtower β Today at 3:00 PM\nThank you! Goodbye! π\nreporterbob β Today at 3:02 PM\nFarewell! Happy exploring.\n")
import logging
import re
from typing import Iterator, List
from langchain_community.chat_loaders import base as chat_loaders
from langchain_core.messages import BaseMessage, HumanMessage
logger = logging.getLogger()
class DiscordChatLoader(chat_loaders.BaseChatLoader):
def __init__(self, path: str):
"""
Initialize the Discord chat loader.
Args:
path: Path to the exported Discord chat text file.
"""
self.path = path
self._message_line_regex = re.compile(
r"(.+?) β (\w{3,9} \d{1,2}(?:st|nd|rd|th)?(?:, \d{4})? \d{1,2}:\d{2} (?:AM|PM)|Today at \d{1,2}:\d{2} (?:AM|PM)|Yesterday at \d{1,2}:\d{2} (?:AM|PM))", # noqa
flags=re.DOTALL,
)
def _load_single_chat_session_from_txt(
self, file_path: str
) -> chat_loaders.ChatSession:
"""
Load a single chat session from a text file.
Args:
file_path: Path to the text file containing the chat messages.
Returns:
A `ChatSession` object containing the loaded chat messages.
"""
with open(file_path, "r", encoding="utf-8") as file:
lines = file.readlines()
results: List[BaseMessage] = []
current_sender = None
current_timestamp = None
current_content = []
for line in lines:
if re.match(
r".+? β (\d{2}/\d{2}/\d{4} \d{1,2}:\d{2} (?:AM|PM)|Today at \d{1,2}:\d{2} (?:AM|PM)|Yesterday at \d{1,2}:\d{2} (?:AM|PM))", # noqa
line,
):
if current_sender and current_content:
results.append(
HumanMessage(
content="".join(current_content).strip(),
additional_kwargs={
"sender": current_sender,
"events": [{"message_time": current_timestamp}],
},
)
)
current_sender, current_timestamp = line.split(" β ")[:2]
current_content = [
line[len(current_sender) + len(current_timestamp) + 4 :].strip()
]
elif re.match(r"\[\d{1,2}:\d{2} (?:AM|PM)\]", line.strip()):
results.append(
HumanMessage(
content="".join(current_content).strip(),
additional_kwargs={
"sender": current_sender,
"events": [{"message_time": current_timestamp}],
},
)
)
current_timestamp = line.strip()[1:-1]
current_content = []
else:
current_content.append("\n" + line.strip())
if current_sender and current_content:
results.append(
HumanMessage(
content="".join(current_content).strip(),
additional_kwargs={
"sender": current_sender,
"events": [{"message_time": current_timestamp}],
},
)
)
return chat_loaders.ChatSession(messages=results)
def lazy_load(self) -> Iterator[chat_loaders.ChatSession]:
"""
Lazy load the messages from the chat file and yield them in the required format.
Yields:
A `ChatSession` object containing the loaded chat messages.
"""
yield self._load_single_chat_session_from_txt(self.path)
loader = DiscordChatLoader(
path="./discord_chats.txt",
)
from typing import List
from langchain_community.chat_loaders.base import ChatSession
from langchain_community.chat_loaders.utils import (
map_ai_messages,
merge_chat_runs,
)
raw_messages = loader.lazy_load()
merged_messages = | merge_chat_runs(raw_messages) | langchain_community.chat_loaders.utils.merge_chat_runs |
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_community.llms.cloudflare_workersai import CloudflareWorkersAI
template = """Human: {question}
AI Assistant: """
prompt = PromptTemplate.from_template(template)
import getpass
my_account_id = getpass.getpass("Enter your Cloudflare account ID:\n\n")
my_api_token = getpass.getpass("Enter your Cloudflare API token:\n\n")
llm = CloudflareWorkersAI(account_id=my_account_id, api_token=my_api_token)
llm_chain = | LLMChain(prompt=prompt, llm=llm) | langchain.chains.LLMChain |
get_ipython().system(' pip install langchain unstructured[all-docs] pydantic lxml langchainhub')
get_ipython().system(' brew install tesseract')
get_ipython().system(' brew install poppler')
path = "/Users/rlm/Desktop/Papers/LLaMA2/"
from typing import Any
from pydantic import BaseModel
from unstructured.partition.pdf import partition_pdf
raw_pdf_elements = partition_pdf(
filename=path + "LLaMA2.pdf",
extract_images_in_pdf=False,
infer_table_structure=True,
chunking_strategy="by_title",
max_characters=4000,
new_after_n_chars=3800,
combine_text_under_n_chars=2000,
image_output_dir_path=path,
)
category_counts = {}
for element in raw_pdf_elements:
category = str(type(element))
if category in category_counts:
category_counts[category] += 1
else:
category_counts[category] = 1
unique_categories = set(category_counts.keys())
category_counts
class Element(BaseModel):
type: str
text: Any
categorized_elements = []
for element in raw_pdf_elements:
if "unstructured.documents.elements.Table" in str(type(element)):
categorized_elements.append(Element(type="table", text=str(element)))
elif "unstructured.documents.elements.CompositeElement" in str(type(element)):
categorized_elements.append(Element(type="text", text=str(element)))
table_elements = [e for e in categorized_elements if e.type == "table"]
print(len(table_elements))
text_elements = [e for e in categorized_elements if e.type == "text"]
print(len(text_elements))
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
prompt_text = """You are an assistant tasked with summarizing tables and text. \
Give a concise summary of the table or text. Table or text chunk: {element} """
prompt = ChatPromptTemplate.from_template(prompt_text)
model = ChatOpenAI(temperature=0, model="gpt-4")
summarize_chain = {"element": lambda x: x} | prompt | model | StrOutputParser()
tables = [i.text for i in table_elements]
table_summaries = summarize_chain.batch(tables, {"max_concurrency": 5})
texts = [i.text for i in text_elements]
text_summaries = summarize_chain.batch(texts, {"max_concurrency": 5})
import uuid
from langchain.retrievers.multi_vector import MultiVectorRetriever
from langchain.storage import InMemoryStore
from langchain_community.vectorstores import Chroma
from langchain_core.documents import Document
from langchain_openai import OpenAIEmbeddings
vectorstore = Chroma(collection_name="summaries", embedding_function=OpenAIEmbeddings())
store = InMemoryStore()
id_key = "doc_id"
retriever = MultiVectorRetriever(
vectorstore=vectorstore,
docstore=store,
id_key=id_key,
)
doc_ids = [str(uuid.uuid4()) for _ in texts]
summary_texts = [
| Document(page_content=s, metadata={id_key: doc_ids[i]}) | langchain_core.documents.Document |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
import os
import uuid
uid = uuid.uuid4().hex[:6]
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_API_KEY"] = "YOUR API KEY"
from langsmith.client import Client
client = Client()
import requests
url = "https://raw.githubusercontent.com/langchain-ai/langchain/master/docs/docs/integrations/chat_loaders/example_data/langsmith_chat_dataset.json"
response = requests.get(url)
response.raise_for_status()
data = response.json()
dataset_name = f"Extraction Fine-tuning Dataset {uid}"
ds = client.create_dataset(dataset_name=dataset_name, data_type="chat")
_ = client.create_examples(
inputs=[e["inputs"] for e in data],
outputs=[e["outputs"] for e in data],
dataset_id=ds.id,
)
from langchain_community.chat_loaders.langsmith import LangSmithDatasetChatLoader
loader = | LangSmithDatasetChatLoader(dataset_name=dataset_name) | langchain_community.chat_loaders.langsmith.LangSmithDatasetChatLoader |
from langchain.prompts import (
ChatPromptTemplate,
FewShotChatMessagePromptTemplate,
)
examples = [
{"input": "2+2", "output": "4"},
{"input": "2+3", "output": "5"},
]
example_prompt = ChatPromptTemplate.from_messages(
[
("human", "{input}"),
("ai", "{output}"),
]
)
few_shot_prompt = FewShotChatMessagePromptTemplate(
example_prompt=example_prompt,
examples=examples,
)
print(few_shot_prompt.format())
final_prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a wondrous wizard of math."),
few_shot_prompt,
("human", "{input}"),
]
)
from langchain_community.chat_models import ChatAnthropic
chain = final_prompt | | ChatAnthropic(temperature=0.0) | langchain_community.chat_models.ChatAnthropic |
from getpass import getpass
KAY_API_KEY = getpass()
OPENAI_API_KEY = getpass()
import os
os.environ["KAY_API_KEY"] = KAY_API_KEY
os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
from langchain.chains import ConversationalRetrievalChain
from langchain.retrievers import KayAiRetriever
from langchain_openai import ChatOpenAI
model = ChatOpenAI(model_name="gpt-3.5-turbo")
retriever = KayAiRetriever.create(
dataset_id="company", data_types=["10-K", "10-Q"], num_contexts=6
)
qa = | ConversationalRetrievalChain.from_llm(model, retriever=retriever) | langchain.chains.ConversationalRetrievalChain.from_llm |
def pretty_print_docs(docs):
print(
f"\n{'-' * 100}\n".join(
[f"Document {i+1}:\n\n" + d.page_content for i, d in enumerate(docs)]
)
)
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import FAISS
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
documents = TextLoader("../../state_of_the_union.txt").load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
retriever = FAISS.from_documents(texts, OpenAIEmbeddings()).as_retriever()
docs = retriever.get_relevant_documents(
"What did the president say about Ketanji Brown Jackson"
)
pretty_print_docs(docs)
from langchain.retrievers import ContextualCompressionRetriever
from langchain.retrievers.document_compressors import LLMChainExtractor
from langchain_openai import OpenAI
llm = OpenAI(temperature=0)
compressor = LLMChainExtractor.from_llm(llm)
compression_retriever = ContextualCompressionRetriever(
base_compressor=compressor, base_retriever=retriever
)
compressed_docs = compression_retriever.get_relevant_documents(
"What did the president say about Ketanji Jackson Brown"
)
pretty_print_docs(compressed_docs)
from langchain.retrievers.document_compressors import LLMChainFilter
_filter = LLMChainFilter.from_llm(llm)
compression_retriever = ContextualCompressionRetriever(
base_compressor=_filter, base_retriever=retriever
)
compressed_docs = compression_retriever.get_relevant_documents(
"What did the president say about Ketanji Jackson Brown"
)
pretty_print_docs(compressed_docs)
from langchain.retrievers.document_compressors import EmbeddingsFilter
from langchain_openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
embeddings_filter = EmbeddingsFilter(embeddings=embeddings, similarity_threshold=0.76)
compression_retriever = ContextualCompressionRetriever(
base_compressor=embeddings_filter, base_retriever=retriever
)
compressed_docs = compression_retriever.get_relevant_documents(
"What did the president say about Ketanji Jackson Brown"
)
pretty_print_docs(compressed_docs)
from langchain.retrievers.document_compressors import DocumentCompressorPipeline
from langchain_community.document_transformers import EmbeddingsRedundantFilter
from langchain_text_splitters import CharacterTextSplitter
splitter = CharacterTextSplitter(chunk_size=300, chunk_overlap=0, separator=". ")
redundant_filter = EmbeddingsRedundantFilter(embeddings=embeddings)
relevant_filter = | EmbeddingsFilter(embeddings=embeddings, similarity_threshold=0.76) | langchain.retrievers.document_compressors.EmbeddingsFilter |
from langchain_community.document_loaders.obs_file import OBSFileLoader
endpoint = "your-endpoint"
from obs import ObsClient
obs_client = ObsClient(
access_key_id="your-access-key",
secret_access_key="your-secret-key",
server=endpoint,
)
loader = | OBSFileLoader("your-bucket-name", "your-object-key", client=obs_client) | langchain_community.document_loaders.obs_file.OBSFileLoader |
from langchain.chains import RetrievalQAWithSourcesChain
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores.jaguar import Jaguar
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI, OpenAI, OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
"""
Load a text file into a set of documents
"""
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=300)
docs = text_splitter.split_documents(documents)
"""
Instantiate a Jaguar vector store
"""
url = "http://192.168.5.88:8080/fwww/"
embeddings = OpenAIEmbeddings()
pod = "vdb"
store = "langchain_rag_store"
vector_index = "v"
vector_type = "cosine_fraction_float"
vector_dimension = 1536
vectorstore = Jaguar(
pod, store, vector_index, vector_type, vector_dimension, url, embeddings
)
"""
Login must be performed to authorize the client.
The environment variable JAGUAR_API_KEY or file $HOME/.jagrc
should contain the API key for accessing JaguarDB servers.
"""
vectorstore.login()
"""
Create vector store on the JaguarDB database server.
This should be done only once.
"""
metadata = "category char(16)"
text_size = 4096
vectorstore.create(metadata, text_size)
"""
Add the texts from the text splitter to our vectorstore
"""
vectorstore.add_documents(docs)
""" Get the retriever object """
retriever = vectorstore.as_retriever()
template = """You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Use three sentences maximum and keep the answer concise.
Question: {question}
Context: {context}
Answer:
"""
prompt = ChatPromptTemplate.from_template(template)
""" Obtain a Large Language Model """
LLM = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)
""" Create a chain for the RAG flow """
rag_chain = (
{"context": retriever, "question": RunnablePassthrough()}
| prompt
| LLM
| | StrOutputParser() | langchain_core.output_parsers.StrOutputParser |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langsmith langchainhub --quiet')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-openai tiktoken pandas duckduckgo-search --quiet')
import os
from uuid import uuid4
unique_id = uuid4().hex[0:8]
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_PROJECT"] = f"Tracing Walkthrough - {unique_id}"
os.environ["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com"
os.environ["LANGCHAIN_API_KEY"] = "<YOUR-API-KEY>" # Update to your API key
os.environ["OPENAI_API_KEY"] = "<YOUR-OPENAI-API-KEY>"
from langsmith import Client
client = Client()
from langchain import hub
from langchain.agents import AgentExecutor
from langchain.agents.format_scratchpad.openai_tools import (
format_to_openai_tool_messages,
)
from langchain.agents.output_parsers.openai_tools import OpenAIToolsAgentOutputParser
from langchain_community.tools import DuckDuckGoSearchResults
from langchain_openai import ChatOpenAI
prompt = hub.pull("wfh/langsmith-agent-prompt:5d466cbc")
llm = ChatOpenAI(
model="gpt-3.5-turbo-16k",
temperature=0,
)
tools = [
DuckDuckGoSearchResults(
name="duck_duck_go"
), # General internet search using DuckDuckGo
]
llm_with_tools = llm.bind_tools(tools)
runnable_agent = (
{
"input": lambda x: x["input"],
"agent_scratchpad": lambda x: format_to_openai_tool_messages(
x["intermediate_steps"]
),
}
| prompt
| llm_with_tools
| OpenAIToolsAgentOutputParser()
)
agent_executor = AgentExecutor(
agent=runnable_agent, tools=tools, handle_parsing_errors=True
)
inputs = [
"What is LangChain?",
"What's LangSmith?",
"When was Llama-v2 released?",
"What is the langsmith cookbook?",
"When did langchain first announce the hub?",
]
results = agent_executor.batch([{"input": x} for x in inputs], return_exceptions=True)
results[:2]
outputs = [
"LangChain is an open-source framework for building applications using large language models. It is also the name of the company building LangSmith.",
"LangSmith is a unified platform for debugging, testing, and monitoring language model applications and agents powered by LangChain",
"July 18, 2023",
"The langsmith cookbook is a github repository containing detailed examples of how to use LangSmith to debug, evaluate, and monitor large language model-powered applications.",
"September 5, 2023",
]
dataset_name = f"agent-qa-{unique_id}"
dataset = client.create_dataset(
dataset_name,
description="An example dataset of questions over the LangSmith documentation.",
)
client.create_examples(
inputs=[{"input": query} for query in inputs],
outputs=[{"output": answer} for answer in outputs],
dataset_id=dataset.id,
)
from langchain import hub
from langchain.agents import AgentExecutor, AgentType, initialize_agent, load_tools
from langchain_openai import ChatOpenAI
def create_agent(prompt, llm_with_tools):
runnable_agent = (
{
"input": lambda x: x["input"],
"agent_scratchpad": lambda x: format_to_openai_tool_messages(
x["intermediate_steps"]
),
}
| prompt
| llm_with_tools
| | OpenAIToolsAgentOutputParser() | langchain.agents.output_parsers.openai_tools.OpenAIToolsAgentOutputParser |