id
stringlengths 14
16
| text
stringlengths 36
2.73k
| source
stringlengths 49
117
|
---|---|---|
05c651df0567-86 | (langchain.llms.ForefrontAI attribute)
(langchain.llms.GooglePalm attribute)
(langchain.llms.GooseAI attribute)
(langchain.llms.LlamaCpp attribute)
(langchain.llms.NLPCloud attribute)
(langchain.llms.OpenAI attribute)
(langchain.llms.OpenLM attribute)
(langchain.llms.Petals attribute)
(langchain.llms.PredictionGuard attribute)
(langchain.llms.RWKV attribute)
(langchain.llms.VertexAI attribute)
(langchain.llms.Writer attribute)
template (langchain.prompts.PromptTemplate attribute)
(langchain.tools.QueryPowerBITool attribute)
template_format (langchain.prompts.FewShotPromptTemplate attribute)
(langchain.prompts.FewShotPromptWithTemplates attribute)
(langchain.prompts.PromptTemplate attribute)
template_tool_response (langchain.agents.ConversationalChatAgent attribute)
text_length (langchain.chains.LLMRequestsChain attribute)
text_splitter (langchain.chains.AnalyzeDocumentChain attribute)
(langchain.chains.MapReduceChain attribute)
(langchain.chains.QAGenerationChain attribute)
TextLoader (class in langchain.document_loaders)
texts (langchain.retrievers.KNNRetriever attribute)
(langchain.retrievers.SVMRetriever attribute)
TextSplitter (class in langchain.text_splitter)
tfidf_array (langchain.retrievers.TFIDFRetriever attribute)
time (langchain.utilities.DuckDuckGoSearchAPIWrapper attribute)
to_typescript() (langchain.tools.APIOperation method)
token (langchain.utilities.PowerBIDataset attribute)
token_path (langchain.document_loaders.GoogleApiClient attribute)
(langchain.document_loaders.GoogleDriveLoader attribute)
tokenizer (langchain.llms.Petals attribute) | https://python.langchain.com/en/latest/genindex.html |
05c651df0567-87 | tokenizer (langchain.llms.Petals attribute)
tokens (langchain.llms.AlephAlpha attribute)
tokens_path (langchain.llms.RWKV attribute)
TokenTextSplitter (class in langchain.text_splitter)
ToMarkdownLoader (class in langchain.document_loaders)
TomlLoader (class in langchain.document_loaders)
tool() (in module langchain.agents)
(in module langchain.tools)
tool_run_logging_kwargs() (langchain.agents.Agent method)
(langchain.agents.BaseMultiActionAgent method)
(langchain.agents.BaseSingleActionAgent method)
(langchain.agents.LLMSingleActionAgent method)
tools (langchain.agents.agent_toolkits.JiraToolkit attribute)
(langchain.agents.agent_toolkits.ZapierToolkit attribute)
(langchain.agents.AgentExecutor attribute)
top_k (langchain.chains.SQLDatabaseChain attribute)
(langchain.chat_models.ChatGooglePalm attribute)
(langchain.llms.AlephAlpha attribute)
(langchain.llms.Anthropic attribute)
(langchain.llms.ForefrontAI attribute)
(langchain.llms.GooglePalm attribute)
(langchain.llms.GPT4All attribute)
(langchain.llms.LlamaCpp attribute)
(langchain.llms.NLPCloud attribute)
(langchain.llms.Petals attribute)
(langchain.llms.VertexAI attribute)
(langchain.retrievers.ChatGPTPluginRetriever attribute)
(langchain.retrievers.DataberryRetriever attribute)
(langchain.retrievers.PineconeHybridSearchRetriever attribute)
top_k_docs_for_context (langchain.chains.ChatVectorDBChain attribute)
top_k_results (langchain.utilities.ArxivAPIWrapper attribute)
(langchain.utilities.GooglePlacesAPIWrapper attribute)
(langchain.utilities.WikipediaAPIWrapper attribute) | https://python.langchain.com/en/latest/genindex.html |
05c651df0567-88 | (langchain.utilities.GooglePlacesAPIWrapper attribute)
(langchain.utilities.WikipediaAPIWrapper attribute)
top_n (langchain.retrievers.document_compressors.CohereRerank attribute)
top_p (langchain.chat_models.ChatGooglePalm attribute)
(langchain.llms.AlephAlpha attribute)
(langchain.llms.Anthropic attribute)
(langchain.llms.AzureOpenAI attribute)
(langchain.llms.ForefrontAI attribute)
(langchain.llms.GooglePalm attribute)
(langchain.llms.GooseAI attribute)
(langchain.llms.GPT4All attribute)
(langchain.llms.LlamaCpp attribute)
(langchain.llms.NLPCloud attribute)
(langchain.llms.OpenAI attribute)
(langchain.llms.OpenLM attribute)
(langchain.llms.Petals attribute)
(langchain.llms.RWKV attribute)
(langchain.llms.VertexAI attribute)
(langchain.llms.Writer attribute)
topP (langchain.llms.AI21 attribute)
traits (langchain.experimental.GenerativeAgent attribute)
transform (langchain.chains.TransformChain attribute)
transform_documents() (langchain.document_transformers.EmbeddingsRedundantFilter method)
(langchain.text_splitter.TextSplitter method)
transform_input_fn (langchain.llms.Databricks attribute)
transform_output_fn (langchain.llms.Databricks attribute)
transformers (langchain.retrievers.document_compressors.DocumentCompressorPipeline attribute)
truncate (langchain.embeddings.CohereEmbeddings attribute)
(langchain.llms.Cohere attribute)
ts_type_from_python() (langchain.tools.APIOperation static method)
ttl (langchain.memory.RedisEntityStore attribute)
tuned_model_name (langchain.llms.VertexAI attribute)
TwitterTweetLoader (class in langchain.document_loaders)
type (langchain.utilities.GoogleSerperAPIWrapper attribute) | https://python.langchain.com/en/latest/genindex.html |
05c651df0567-89 | type (langchain.utilities.GoogleSerperAPIWrapper attribute)
Typesense (class in langchain.vectorstores)
U
unsecure (langchain.utilities.searx_search.SearxSearchWrapper attribute)
(langchain.utilities.SearxSearchWrapper attribute)
UnstructuredAPIFileIOLoader (class in langchain.document_loaders)
UnstructuredAPIFileLoader (class in langchain.document_loaders)
UnstructuredEmailLoader (class in langchain.document_loaders)
UnstructuredEPubLoader (class in langchain.document_loaders)
UnstructuredFileIOLoader (class in langchain.document_loaders)
UnstructuredFileLoader (class in langchain.document_loaders)
UnstructuredHTMLLoader (class in langchain.document_loaders)
UnstructuredImageLoader (class in langchain.document_loaders)
UnstructuredMarkdownLoader (class in langchain.document_loaders)
UnstructuredODTLoader (class in langchain.document_loaders)
UnstructuredPDFLoader (class in langchain.document_loaders)
UnstructuredPowerPointLoader (class in langchain.document_loaders)
UnstructuredRTFLoader (class in langchain.document_loaders)
UnstructuredURLLoader (class in langchain.document_loaders)
UnstructuredWordDocumentLoader (class in langchain.document_loaders)
update_document() (langchain.vectorstores.Chroma method)
update_forward_refs() (langchain.llms.AI21 class method)
(langchain.llms.AlephAlpha class method)
(langchain.llms.Anthropic class method)
(langchain.llms.Anyscale class method)
(langchain.llms.AzureOpenAI class method)
(langchain.llms.Banana class method)
(langchain.llms.Beam class method)
(langchain.llms.CerebriumAI class method)
(langchain.llms.Cohere class method) | https://python.langchain.com/en/latest/genindex.html |
05c651df0567-90 | (langchain.llms.Cohere class method)
(langchain.llms.CTransformers class method)
(langchain.llms.Databricks class method)
(langchain.llms.DeepInfra class method)
(langchain.llms.FakeListLLM class method)
(langchain.llms.ForefrontAI class method)
(langchain.llms.GooglePalm class method)
(langchain.llms.GooseAI class method)
(langchain.llms.GPT4All class method)
(langchain.llms.HuggingFaceEndpoint class method)
(langchain.llms.HuggingFaceHub class method)
(langchain.llms.HuggingFacePipeline class method)
(langchain.llms.HuggingFaceTextGenInference class method)
(langchain.llms.HumanInputLLM class method)
(langchain.llms.LlamaCpp class method)
(langchain.llms.Modal class method)
(langchain.llms.MosaicML class method)
(langchain.llms.NLPCloud class method)
(langchain.llms.OpenAI class method)
(langchain.llms.OpenAIChat class method)
(langchain.llms.OpenLM class method)
(langchain.llms.Petals class method)
(langchain.llms.PipelineAI class method)
(langchain.llms.PredictionGuard class method)
(langchain.llms.PromptLayerOpenAI class method)
(langchain.llms.PromptLayerOpenAIChat class method)
(langchain.llms.Replicate class method)
(langchain.llms.RWKV class method)
(langchain.llms.SagemakerEndpoint class method)
(langchain.llms.SelfHostedHuggingFaceLLM class method)
(langchain.llms.SelfHostedPipeline class method)
(langchain.llms.StochasticAI class method)
(langchain.llms.VertexAI class method)
(langchain.llms.Writer class method) | https://python.langchain.com/en/latest/genindex.html |
05c651df0567-91 | (langchain.llms.VertexAI class method)
(langchain.llms.Writer class method)
upsert_messages() (langchain.memory.CosmosDBChatMessageHistory method)
url (langchain.document_loaders.MathpixPDFLoader property)
(langchain.llms.Beam attribute)
(langchain.retrievers.ChatGPTPluginRetriever attribute)
(langchain.retrievers.RemoteLangChainRetriever attribute)
(langchain.tools.IFTTTWebhook attribute)
urls (langchain.document_loaders.PlaywrightURLLoader attribute)
(langchain.document_loaders.SeleniumURLLoader attribute)
use_mlock (langchain.embeddings.LlamaCppEmbeddings attribute)
(langchain.llms.GPT4All attribute)
(langchain.llms.LlamaCpp attribute)
use_mmap (langchain.llms.LlamaCpp attribute)
use_multiplicative_presence_penalty (langchain.llms.AlephAlpha attribute)
use_query_checker (langchain.chains.SQLDatabaseChain attribute)
username (langchain.vectorstores.MyScaleSettings attribute)
V
validate_channel_or_videoIds_is_set() (langchain.document_loaders.GoogleApiClient class method)
(langchain.document_loaders.GoogleApiYoutubeLoader class method)
validate_init_args() (langchain.document_loaders.ConfluenceLoader static method)
validate_template (langchain.prompts.FewShotPromptTemplate attribute)
(langchain.prompts.FewShotPromptWithTemplates attribute)
(langchain.prompts.PromptTemplate attribute)
Vectara (class in langchain.vectorstores)
vectorizer (langchain.retrievers.TFIDFRetriever attribute)
VectorStore (class in langchain.vectorstores)
vectorstore (langchain.agents.agent_toolkits.VectorStoreInfo attribute)
(langchain.chains.ChatVectorDBChain attribute)
(langchain.chains.VectorDBQA attribute)
(langchain.chains.VectorDBQAWithSourcesChain attribute) | https://python.langchain.com/en/latest/genindex.html |
05c651df0567-92 | (langchain.chains.VectorDBQAWithSourcesChain attribute)
(langchain.prompts.example_selector.SemanticSimilarityExampleSelector attribute)
(langchain.retrievers.SelfQueryRetriever attribute)
(langchain.retrievers.TimeWeightedVectorStoreRetriever attribute)
vectorstore_info (langchain.agents.agent_toolkits.VectorStoreToolkit attribute)
vectorstores (langchain.agents.agent_toolkits.VectorStoreRouterToolkit attribute)
verbose (langchain.llms.AI21 attribute)
(langchain.llms.AlephAlpha attribute)
(langchain.llms.Anthropic attribute)
(langchain.llms.Anyscale attribute)
(langchain.llms.AzureOpenAI attribute)
(langchain.llms.Banana attribute)
(langchain.llms.Beam attribute)
(langchain.llms.CerebriumAI attribute)
(langchain.llms.Cohere attribute)
(langchain.llms.CTransformers attribute)
(langchain.llms.Databricks attribute)
(langchain.llms.DeepInfra attribute)
(langchain.llms.FakeListLLM attribute)
(langchain.llms.ForefrontAI attribute)
(langchain.llms.GooglePalm attribute)
(langchain.llms.GooseAI attribute)
(langchain.llms.GPT4All attribute)
(langchain.llms.HuggingFaceEndpoint attribute)
(langchain.llms.HuggingFaceHub attribute)
(langchain.llms.HuggingFacePipeline attribute)
(langchain.llms.HuggingFaceTextGenInference attribute)
(langchain.llms.HumanInputLLM attribute)
(langchain.llms.LlamaCpp attribute)
(langchain.llms.Modal attribute)
(langchain.llms.MosaicML attribute)
(langchain.llms.NLPCloud attribute)
(langchain.llms.OpenAI attribute)
(langchain.llms.OpenAIChat attribute)
(langchain.llms.OpenLM attribute) | https://python.langchain.com/en/latest/genindex.html |
05c651df0567-93 | (langchain.llms.OpenAIChat attribute)
(langchain.llms.OpenLM attribute)
(langchain.llms.Petals attribute)
(langchain.llms.PipelineAI attribute)
(langchain.llms.PredictionGuard attribute)
(langchain.llms.Replicate attribute)
(langchain.llms.RWKV attribute)
(langchain.llms.SagemakerEndpoint attribute)
(langchain.llms.SelfHostedHuggingFaceLLM attribute)
(langchain.llms.SelfHostedPipeline attribute)
(langchain.llms.StochasticAI attribute)
(langchain.llms.VertexAI attribute)
(langchain.llms.Writer attribute)
(langchain.retrievers.SelfQueryRetriever attribute)
(langchain.tools.BaseTool attribute)
(langchain.tools.Tool attribute)
VespaRetriever (class in langchain.retrievers)
video_ids (langchain.document_loaders.GoogleApiYoutubeLoader attribute)
visible_only (langchain.tools.ClickTool attribute)
vocab_only (langchain.embeddings.LlamaCppEmbeddings attribute)
(langchain.llms.GPT4All attribute)
(langchain.llms.LlamaCpp attribute)
W
wait_for_processing() (langchain.document_loaders.MathpixPDFLoader method)
WeatherDataLoader (class in langchain.document_loaders)
Weaviate (class in langchain.vectorstores)
WeaviateHybridSearchRetriever (class in langchain.retrievers)
WeaviateHybridSearchRetriever.Config (class in langchain.retrievers)
web_path (langchain.document_loaders.WebBaseLoader property)
web_paths (langchain.document_loaders.WebBaseLoader attribute)
WebBaseLoader (class in langchain.document_loaders)
WhatsAppChatLoader (class in langchain.document_loaders)
Wikipedia (class in langchain.docstore) | https://python.langchain.com/en/latest/genindex.html |
05c651df0567-94 | Wikipedia (class in langchain.docstore)
WikipediaLoader (class in langchain.document_loaders)
wolfram_alpha_appid (langchain.utilities.WolframAlphaAPIWrapper attribute)
writer_api_key (langchain.llms.Writer attribute)
writer_org_id (langchain.llms.Writer attribute)
Y
YoutubeLoader (class in langchain.document_loaders)
Z
zapier_description (langchain.tools.ZapierNLARunAction attribute)
ZepRetriever (class in langchain.retrievers)
ZERO_SHOT_REACT_DESCRIPTION (langchain.agents.AgentType attribute)
Zilliz (class in langchain.vectorstores)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/genindex.html |
a2389755f3d1-0 | .rst
.pdf
Memory
Memory#
Note
Conceptual Guide
By default, Chains and Agents are stateless,
meaning that they treat each incoming query independently (as are the underlying LLMs and chat models).
In some applications (chatbots being a GREAT example) it is highly important
to remember previous interactions, both at a short term but also at a long term level.
The concept of “Memory” exists to do exactly that.
LangChain provides memory components in two forms.
First, LangChain provides helper utilities for managing and manipulating previous chat messages.
These are designed to be modular and useful regardless of how they are used.
Secondly, LangChain provides easy ways to incorporate these utilities into chains.
The following sections of documentation are provided:
Getting Started: An overview of how to get started with different types of memory.
How-To Guides: A collection of how-to guides. These highlight different types of memory, as well as how to use memory in chains.
Memory
Getting Started
How-To Guides
previous
Structured Output Parser
next
Getting Started
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/modules/memory.html |
cea206716634-0 | .rst
.pdf
Indexes
Contents
Go Deeper
Indexes#
Note
Conceptual Guide
Indexes refer to ways to structure documents so that LLMs can best interact with them.
This module contains utility functions for working with documents, different types of indexes, and then examples for using those indexes in chains.
The most common way that indexes are used in chains is in a “retrieval” step.
This step refers to taking a user’s query and returning the most relevant documents.
We draw this distinction because (1) an index can be used for other things besides retrieval, and (2) retrieval can use other logic besides an index to find relevant documents.
We therefore have a concept of a “Retriever” interface - this is the interface that most chains work with.
Most of the time when we talk about indexes and retrieval we are talking about indexing and retrieving unstructured data (like text documents).
For interacting with structured data (SQL tables, etc) or APIs, please see the corresponding use case sections for links to relevant functionality.
The primary index and retrieval types supported by LangChain are currently centered around vector databases, and therefore
a lot of the functionality we dive deep on those topics.
For an overview of everything related to this, please see the below notebook for getting started:
Getting Started
We then provide a deep dive on the four main components.
Document Loaders
How to load documents from a variety of sources.
Text Splitters
An overview of the abstractions and implementions around splitting text.
VectorStores
An overview of VectorStores and the many integrations LangChain provides.
Retrievers
An overview of Retrievers and the implementations LangChain provides.
Go Deeper#
Document Loaders
Text Splitters
Vectorstores
Retrievers
previous
Zep Memory
next
Getting Started
Contents
Go Deeper | https://python.langchain.com/en/latest/modules/indexes.html |
cea206716634-1 | previous
Zep Memory
next
Getting Started
Contents
Go Deeper
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/modules/indexes.html |
e4a09180d245-0 | .rst
.pdf
Prompts
Contents
Getting Started
Go Deeper
Prompts#
Note
Conceptual Guide
The new way of programming models is through prompts.
A “prompt” refers to the input to the model.
This input is rarely hard coded, but rather is often constructed from multiple components.
A PromptTemplate is responsible for the construction of this input.
LangChain provides several classes and functions to make constructing and working with prompts easy.
This section of documentation is split into four sections:
LLM Prompt Templates
How to use PromptTemplates to prompt Language Models.
Chat Prompt Templates
How to use PromptTemplates to prompt Chat Models.
Example Selectors
Often times it is useful to include examples in prompts.
These examples can be hardcoded, but it is often more powerful if they are dynamically selected.
This section goes over example selection.
Output Parsers
Language models (and Chat Models) output text.
But many times you may want to get more structured information than just text back.
This is where output parsers come in.
Output Parsers are responsible for (1) instructing the model how output should be formatted,
(2) parsing output into the desired formatting (including retrying if necessary).
Getting Started#
Getting Started
Go Deeper#
Prompt Templates
Chat Prompt Template
Example Selectors
Output Parsers
previous
TensorflowHub
next
Getting Started
Contents
Getting Started
Go Deeper
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/modules/prompts.html |
45510dc56aae-0 | .rst
.pdf
Models
Contents
Getting Started
Go Deeper
Models#
Note
Conceptual Guide
This section of the documentation deals with different types of models that are used in LangChain.
On this page we will go over the model types at a high level,
but we have individual pages for each model type.
The pages contain more detailed “how-to” guides for working with that model,
as well as a list of different model providers.
LLMs
Large Language Models (LLMs) are the first type of models we cover.
These models take a text string as input, and return a text string as output.
Chat Models
Chat Models are the second type of models we cover.
These models are usually backed by a language model, but their APIs are more structured.
Specifically, these models take a list of Chat Messages as input, and return a Chat Message.
Text Embedding Models
The third type of models we cover are text embedding models.
These models take text as input and return a list of floats.
Getting Started#
Getting Started
Go Deeper#
LLMs
Chat Models
Text Embedding Models
previous
Tutorials
next
Getting Started
Contents
Getting Started
Go Deeper
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/modules/models.html |
3a0893d10d97-0 | .rst
.pdf
Chains
Chains#
Note
Conceptual Guide
Using an LLM in isolation is fine for some simple applications,
but many more complex ones require chaining LLMs - either with each other or with other experts.
LangChain provides a standard interface for Chains, as well as some common implementations of chains for ease of use.
The following sections of documentation are provided:
Getting Started: A getting started guide for chains, to get you up and running quickly.
How-To Guides: A collection of how-to guides. These highlight how to use various types of chains.
Reference: API reference documentation for all Chain classes.
previous
Zep Memory
next
Getting Started
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/modules/chains.html |
b3edfc139726-0 | .rst
.pdf
Agents
Contents
Action Agents
Plan-and-Execute Agents
Agents#
Note
Conceptual Guide
Some applications will require not just a predetermined chain of calls to LLMs/other tools,
but potentially an unknown chain that depends on the user’s input.
In these types of chains, there is a “agent” which has access to a suite of tools.
Depending on the user input, the agent can then decide which, if any, of these tools to call.
At the moment, there are two main types of agents:
“Action Agents”: these agents decide an action to take and take that action one step at a time
“Plan-and-Execute Agents”: these agents first decide a plan of actions to take, and then execute those actions one at a time.
When should you use each one? Action Agents are more conventional, and good for small tasks.
For more complex or long running tasks, the initial planning step helps to maintain long term objectives and focus. However, that comes at the expense of generally more calls and higher latency.
These two agents are also not mutually exclusive - in fact, it is often best to have an Action Agent be in charge of the execution for the Plan and Execute agent.
Action Agents#
High level pseudocode of agents looks something like:
Some user input is received
The agent decides which tool - if any - to use, and what the input to that tool should be
That tool is then called with that tool input, and an observation is recorded (this is just the output of calling that tool with that tool input)
That history of tool, tool input, and observation is passed back into the agent, and it decides what step to take next
This is repeated until the agent decides it no longer needs to use a tool, and then it responds directly to the user.
The different abstractions involved in agents are as follows: | https://python.langchain.com/en/latest/modules/agents.html |
b3edfc139726-1 | The different abstractions involved in agents are as follows:
Agent: this is where the logic of the application lives. Agents expose an interface that takes in user input along with a list of previous steps the agent has taken, and returns either an AgentAction or AgentFinish
AgentAction corresponds to the tool to use and the input to that tool
AgentFinish means the agent is done, and has information around what to return to the user
Tools: these are the actions an agent can take. What tools you give an agent highly depend on what you want the agent to do
Toolkits: these are groups of tools designed for a specific use case. For example, in order for an agent to interact with a SQL database in the best way it may need access to one tool to execute queries and another tool to inspect tables.
Agent Executor: this wraps an agent and a list of tools. This is responsible for the loop of running the agent iteratively until the stopping criteria is met.
The most important abstraction of the four above to understand is that of the agent.
Although an agent can be defined in whatever way one chooses, the typical way to construct an agent is with:
PromptTemplate: this is responsible for taking the user input and previous steps and constructing a prompt to send to the language model
Language Model: this takes the prompt constructed by the PromptTemplate and returns some output
Output Parser: this takes the output of the Language Model and parses it into an AgentAction or AgentFinish object.
In this section of documentation, we first start with a Getting Started notebook to cover how to use all things related to agents in an end-to-end manner.
We then split the documentation into the following sections:
Tools
In this section we cover the different types of tools LangChain supports natively.
We then cover how to add your own tools.
Agents
In this section we cover the different types of agents LangChain supports natively. | https://python.langchain.com/en/latest/modules/agents.html |
b3edfc139726-2 | Agents
In this section we cover the different types of agents LangChain supports natively.
We then cover how to modify and create your own agents.
Toolkits
In this section we go over the various toolkits that LangChain supports out of the box,
and how to create an agent from them.
Agent Executor
In this section we go over the Agent Executor class, which is responsible for calling
the agent and tools in a loop. We go over different ways to customize this, and options you
can use for more control.
Go Deeper
Tools
Agents
Toolkits
Agent Executors
Plan-and-Execute Agents#
High level pseudocode of agents looks something like:
Some user input is received
The planner lists out the steps to take
The executor goes through the list of steps, executing them
The most typical implementation is to have the planner be a language model,
and the executor be an action agent.
Go Deeper
Plan and Execute
Imports
Tools
Planner, Executor, and Agent
Run Example
previous
Chains
next
Getting Started
Contents
Action Agents
Plan-and-Execute Agents
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/modules/agents.html |
7d9a7450f354-0 | .ipynb
.pdf
Getting Started
Contents
ChatMessageHistory
ConversationBufferMemory
Using in a chain
Saving Message History
Getting Started#
This notebook walks through how LangChain thinks about memory.
Memory involves keeping a concept of state around throughout a user’s interactions with an language model. A user’s interactions with a language model are captured in the concept of ChatMessages, so this boils down to ingesting, capturing, transforming and extracting knowledge from a sequence of chat messages. There are many different ways to do this, each of which exists as its own memory type.
In general, for each type of memory there are two ways to understanding using memory. These are the standalone functions which extract information from a sequence of messages, and then there is the way you can use this type of memory in a chain.
Memory can return multiple pieces of information (for example, the most recent N messages and a summary of all previous messages). The returned information can either be a string or a list of messages.
In this notebook, we will walk through the simplest form of memory: “buffer” memory, which just involves keeping a buffer of all prior messages. We will show how to use the modular utility functions here, then show how it can be used in a chain (both returning a string as well as a list of messages).
ChatMessageHistory#
One of the core utility classes underpinning most (if not all) memory modules is the ChatMessageHistory class. This is a super lightweight wrapper which exposes convenience methods for saving Human messages, AI messages, and then fetching them all.
You may want to use this class directly if you are managing memory outside of a chain.
from langchain.memory import ChatMessageHistory
history = ChatMessageHistory()
history.add_user_message("hi!")
history.add_ai_message("whats up?")
history.messages
[HumanMessage(content='hi!', additional_kwargs={}), | https://python.langchain.com/en/latest/modules/memory/getting_started.html |
7d9a7450f354-1 | history.messages
[HumanMessage(content='hi!', additional_kwargs={}),
AIMessage(content='whats up?', additional_kwargs={})]
ConversationBufferMemory#
We now show how to use this simple concept in a chain. We first showcase ConversationBufferMemory which is just a wrapper around ChatMessageHistory that extracts the messages in a variable.
We can first extract it as a string.
from langchain.memory import ConversationBufferMemory
memory = ConversationBufferMemory()
memory.chat_memory.add_user_message("hi!")
memory.chat_memory.add_ai_message("whats up?")
memory.load_memory_variables({})
{'history': 'Human: hi!\nAI: whats up?'}
We can also get the history as a list of messages
memory = ConversationBufferMemory(return_messages=True)
memory.chat_memory.add_user_message("hi!")
memory.chat_memory.add_ai_message("whats up?")
memory.load_memory_variables({})
{'history': [HumanMessage(content='hi!', additional_kwargs={}),
AIMessage(content='whats up?', additional_kwargs={})]}
Using in a chain#
Finally, let’s take a look at using this in a chain (setting verbose=True so we can see the prompt).
from langchain.llms import OpenAI
from langchain.chains import ConversationChain
llm = OpenAI(temperature=0)
conversation = ConversationChain(
llm=llm,
verbose=True,
memory=ConversationBufferMemory()
)
conversation.predict(input="Hi there!")
> Entering new ConversationChain chain...
Prompt after formatting:
The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
Human: Hi there!
AI: | https://python.langchain.com/en/latest/modules/memory/getting_started.html |
7d9a7450f354-2 | Current conversation:
Human: Hi there!
AI:
> Finished chain.
" Hi there! It's nice to meet you. How can I help you today?"
conversation.predict(input="I'm doing well! Just having a conversation with an AI.")
> Entering new ConversationChain chain...
Prompt after formatting:
The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
Human: Hi there!
AI: Hi there! It's nice to meet you. How can I help you today?
Human: I'm doing well! Just having a conversation with an AI.
AI:
> Finished chain.
" That's great! It's always nice to have a conversation with someone new. What would you like to talk about?"
conversation.predict(input="Tell me about yourself.")
> Entering new ConversationChain chain...
Prompt after formatting:
The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
Human: Hi there!
AI: Hi there! It's nice to meet you. How can I help you today?
Human: I'm doing well! Just having a conversation with an AI.
AI: That's great! It's always nice to have a conversation with someone new. What would you like to talk about?
Human: Tell me about yourself.
AI:
> Finished chain. | https://python.langchain.com/en/latest/modules/memory/getting_started.html |
7d9a7450f354-3 | Human: Tell me about yourself.
AI:
> Finished chain.
" Sure! I'm an AI created to help people with their everyday tasks. I'm programmed to understand natural language and provide helpful information. I'm also constantly learning and updating my knowledge base so I can provide more accurate and helpful answers."
Saving Message History#
You may often have to save messages, and then load them to use again. This can be done easily by first converting the messages to normal python dictionaries, saving those (as json or something) and then loading those. Here is an example of doing that.
import json
from langchain.memory import ChatMessageHistory
from langchain.schema import messages_from_dict, messages_to_dict
history = ChatMessageHistory()
history.add_user_message("hi!")
history.add_ai_message("whats up?")
dicts = messages_to_dict(history.messages)
dicts
[{'type': 'human', 'data': {'content': 'hi!', 'additional_kwargs': {}}},
{'type': 'ai', 'data': {'content': 'whats up?', 'additional_kwargs': {}}}]
new_messages = messages_from_dict(dicts)
new_messages
[HumanMessage(content='hi!', additional_kwargs={}),
AIMessage(content='whats up?', additional_kwargs={})]
And that’s it for the getting started! There are plenty of different types of memory, check out our examples to see them all
previous
Memory
next
How-To Guides
Contents
ChatMessageHistory
ConversationBufferMemory
Using in a chain
Saving Message History
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/modules/memory/getting_started.html |
e7d7396c8e69-0 | .rst
.pdf
How-To Guides
Contents
Types
Usage
How-To Guides#
Types#
The first set of examples all highlight different types of memory.
ConversationBufferMemory
ConversationBufferWindowMemory
Entity Memory
Conversation Knowledge Graph Memory
ConversationSummaryMemory
ConversationSummaryBufferMemory
ConversationTokenBufferMemory
VectorStore-Backed Memory
Usage#
The examples here all highlight how to use memory in different ways.
How to add Memory to an LLMChain
How to add memory to a Multi-Input Chain
How to add Memory to an Agent
Adding Message Memory backed by a database to an Agent
Cassandra Chat Message History
How to customize conversational memory
How to create a custom Memory class
Dynamodb Chat Message History
Momento
Mongodb Chat Message History
Motörhead Memory
How to use multiple memory classes in the same chain
Postgres Chat Message History
Redis Chat Message History
Zep Memory
previous
Getting Started
next
ConversationBufferMemory
Contents
Types
Usage
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/modules/memory/how_to_guides.html |
0d995a98868f-0 | .ipynb
.pdf
Entity Memory
Contents
Using in a chain
Inspecting the memory store
Entity Memory#
This notebook shows how to work with a memory module that remembers things about specific entities. It extracts information on entities (using LLMs) and builds up its knowledge about that entity over time (also using LLMs).
Let’s first walk through using this functionality.
from langchain.llms import OpenAI
from langchain.memory import ConversationEntityMemory
llm = OpenAI(temperature=0)
memory = ConversationEntityMemory(llm=llm)
_input = {"input": "Deven & Sam are working on a hackathon project"}
memory.load_memory_variables(_input)
memory.save_context(
_input,
{"output": " That sounds like a great project! What kind of project are they working on?"}
)
memory.load_memory_variables({"input": 'who is Sam'})
{'history': 'Human: Deven & Sam are working on a hackathon project\nAI: That sounds like a great project! What kind of project are they working on?',
'entities': {'Sam': 'Sam is working on a hackathon project with Deven.'}}
memory = ConversationEntityMemory(llm=llm, return_messages=True)
_input = {"input": "Deven & Sam are working on a hackathon project"}
memory.load_memory_variables(_input)
memory.save_context(
_input,
{"output": " That sounds like a great project! What kind of project are they working on?"}
)
memory.load_memory_variables({"input": 'who is Sam'})
{'history': [HumanMessage(content='Deven & Sam are working on a hackathon project', additional_kwargs={}),
AIMessage(content=' That sounds like a great project! What kind of project are they working on?', additional_kwargs={})], | https://python.langchain.com/en/latest/modules/memory/types/entity_summary_memory.html |
0d995a98868f-1 | 'entities': {'Sam': 'Sam is working on a hackathon project with Deven.'}}
Using in a chain#
Let’s now use it in a chain!
from langchain.chains import ConversationChain
from langchain.memory import ConversationEntityMemory
from langchain.memory.prompt import ENTITY_MEMORY_CONVERSATION_TEMPLATE
from pydantic import BaseModel
from typing import List, Dict, Any
conversation = ConversationChain(
llm=llm,
verbose=True,
prompt=ENTITY_MEMORY_CONVERSATION_TEMPLATE,
memory=ConversationEntityMemory(llm=llm)
)
conversation.predict(input="Deven & Sam are working on a hackathon project")
> Entering new ConversationChain chain...
Prompt after formatting:
You are an assistant to a human, powered by a large language model trained by OpenAI.
You are designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, you are able to generate human-like text based on the input you receive, allowing you to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
You are constantly learning and improving, and your capabilities are constantly evolving. You are able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. You have access to some personalized information provided by the human in the Context section below. Additionally, you are able to generate your own text based on the input you receive, allowing you to engage in discussions and provide explanations and descriptions on a wide range of topics. | https://python.langchain.com/en/latest/modules/memory/types/entity_summary_memory.html |
0d995a98868f-2 | Overall, you are a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether the human needs help with a specific question or just wants to have a conversation about a particular topic, you are here to assist.
Context:
{'Deven': 'Deven is working on a hackathon project with Sam.', 'Sam': 'Sam is working on a hackathon project with Deven.'}
Current conversation:
Last line:
Human: Deven & Sam are working on a hackathon project
You:
> Finished chain.
' That sounds like a great project! What kind of project are they working on?'
conversation.memory.entity_store.store
{'Deven': 'Deven is working on a hackathon project with Sam, which they are entering into a hackathon.',
'Sam': 'Sam is working on a hackathon project with Deven.'}
conversation.predict(input="They are trying to add more complex memory structures to Langchain")
> Entering new ConversationChain chain...
Prompt after formatting:
You are an assistant to a human, powered by a large language model trained by OpenAI.
You are designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, you are able to generate human-like text based on the input you receive, allowing you to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand. | https://python.langchain.com/en/latest/modules/memory/types/entity_summary_memory.html |
0d995a98868f-3 | You are constantly learning and improving, and your capabilities are constantly evolving. You are able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. You have access to some personalized information provided by the human in the Context section below. Additionally, you are able to generate your own text based on the input you receive, allowing you to engage in discussions and provide explanations and descriptions on a wide range of topics.
Overall, you are a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether the human needs help with a specific question or just wants to have a conversation about a particular topic, you are here to assist.
Context:
{'Deven': 'Deven is working on a hackathon project with Sam, which they are entering into a hackathon.', 'Sam': 'Sam is working on a hackathon project with Deven.', 'Langchain': ''}
Current conversation:
Human: Deven & Sam are working on a hackathon project
AI: That sounds like a great project! What kind of project are they working on?
Last line:
Human: They are trying to add more complex memory structures to Langchain
You:
> Finished chain.
' That sounds like an interesting project! What kind of memory structures are they trying to add?'
conversation.predict(input="They are adding in a key-value store for entities mentioned so far in the conversation.")
> Entering new ConversationChain chain...
Prompt after formatting:
You are an assistant to a human, powered by a large language model trained by OpenAI. | https://python.langchain.com/en/latest/modules/memory/types/entity_summary_memory.html |
0d995a98868f-4 | You are an assistant to a human, powered by a large language model trained by OpenAI.
You are designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, you are able to generate human-like text based on the input you receive, allowing you to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
You are constantly learning and improving, and your capabilities are constantly evolving. You are able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. You have access to some personalized information provided by the human in the Context section below. Additionally, you are able to generate your own text based on the input you receive, allowing you to engage in discussions and provide explanations and descriptions on a wide range of topics.
Overall, you are a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether the human needs help with a specific question or just wants to have a conversation about a particular topic, you are here to assist.
Context:
{'Deven': 'Deven is working on a hackathon project with Sam, which they are entering into a hackathon. They are trying to add more complex memory structures to Langchain.', 'Sam': 'Sam is working on a hackathon project with Deven, trying to add more complex memory structures to Langchain.', 'Langchain': 'Langchain is a project that is trying to add more complex memory structures.', 'Key-Value Store': ''}
Current conversation:
Human: Deven & Sam are working on a hackathon project
AI: That sounds like a great project! What kind of project are they working on? | https://python.langchain.com/en/latest/modules/memory/types/entity_summary_memory.html |
0d995a98868f-5 | AI: That sounds like a great project! What kind of project are they working on?
Human: They are trying to add more complex memory structures to Langchain
AI: That sounds like an interesting project! What kind of memory structures are they trying to add?
Last line:
Human: They are adding in a key-value store for entities mentioned so far in the conversation.
You:
> Finished chain.
' That sounds like a great idea! How will the key-value store help with the project?'
conversation.predict(input="What do you know about Deven & Sam?")
> Entering new ConversationChain chain...
Prompt after formatting:
You are an assistant to a human, powered by a large language model trained by OpenAI.
You are designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, you are able to generate human-like text based on the input you receive, allowing you to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
You are constantly learning and improving, and your capabilities are constantly evolving. You are able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. You have access to some personalized information provided by the human in the Context section below. Additionally, you are able to generate your own text based on the input you receive, allowing you to engage in discussions and provide explanations and descriptions on a wide range of topics.
Overall, you are a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether the human needs help with a specific question or just wants to have a conversation about a particular topic, you are here to assist.
Context: | https://python.langchain.com/en/latest/modules/memory/types/entity_summary_memory.html |
0d995a98868f-6 | Context:
{'Deven': 'Deven is working on a hackathon project with Sam, which they are entering into a hackathon. They are trying to add more complex memory structures to Langchain, including a key-value store for entities mentioned so far in the conversation.', 'Sam': 'Sam is working on a hackathon project with Deven, trying to add more complex memory structures to Langchain, including a key-value store for entities mentioned so far in the conversation.'}
Current conversation:
Human: Deven & Sam are working on a hackathon project
AI: That sounds like a great project! What kind of project are they working on?
Human: They are trying to add more complex memory structures to Langchain
AI: That sounds like an interesting project! What kind of memory structures are they trying to add?
Human: They are adding in a key-value store for entities mentioned so far in the conversation.
AI: That sounds like a great idea! How will the key-value store help with the project?
Last line:
Human: What do you know about Deven & Sam?
You:
> Finished chain.
' Deven and Sam are working on a hackathon project together, trying to add more complex memory structures to Langchain, including a key-value store for entities mentioned so far in the conversation. They seem to be working hard on this project and have a great idea for how the key-value store can help.'
Inspecting the memory store#
We can also inspect the memory store directly. In the following examaples, we look at it directly, and then go through some examples of adding information and watch how it changes.
from pprint import pprint
pprint(conversation.memory.entity_store.store)
{'Daimon': 'Daimon is a company founded by Sam, a successful entrepreneur.', | https://python.langchain.com/en/latest/modules/memory/types/entity_summary_memory.html |
0d995a98868f-7 | {'Daimon': 'Daimon is a company founded by Sam, a successful entrepreneur.',
'Deven': 'Deven is working on a hackathon project with Sam, which they are '
'entering into a hackathon. They are trying to add more complex '
'memory structures to Langchain, including a key-value store for '
'entities mentioned so far in the conversation, and seem to be '
'working hard on this project with a great idea for how the '
'key-value store can help.',
'Key-Value Store': 'A key-value store is being added to the project to store '
'entities mentioned in the conversation.',
'Langchain': 'Langchain is a project that is trying to add more complex '
'memory structures, including a key-value store for entities '
'mentioned so far in the conversation.',
'Sam': 'Sam is working on a hackathon project with Deven, trying to add more '
'complex memory structures to Langchain, including a key-value store '
'for entities mentioned so far in the conversation. They seem to have '
'a great idea for how the key-value store can help, and Sam is also '
'the founder of a company called Daimon.'}
conversation.predict(input="Sam is the founder of a company called Daimon.")
> Entering new ConversationChain chain...
Prompt after formatting:
You are an assistant to a human, powered by a large language model trained by OpenAI. | https://python.langchain.com/en/latest/modules/memory/types/entity_summary_memory.html |
0d995a98868f-8 | You are an assistant to a human, powered by a large language model trained by OpenAI.
You are designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, you are able to generate human-like text based on the input you receive, allowing you to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
You are constantly learning and improving, and your capabilities are constantly evolving. You are able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. You have access to some personalized information provided by the human in the Context section below. Additionally, you are able to generate your own text based on the input you receive, allowing you to engage in discussions and provide explanations and descriptions on a wide range of topics.
Overall, you are a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether the human needs help with a specific question or just wants to have a conversation about a particular topic, you are here to assist.
Context:
{'Daimon': 'Daimon is a company founded by Sam, a successful entrepreneur.', 'Sam': 'Sam is working on a hackathon project with Deven, trying to add more complex memory structures to Langchain, including a key-value store for entities mentioned so far in the conversation. They seem to have a great idea for how the key-value store can help, and Sam is also the founder of a company called Daimon.'}
Current conversation:
Human: They are adding in a key-value store for entities mentioned so far in the conversation.
AI: That sounds like a great idea! How will the key-value store help with the project? | https://python.langchain.com/en/latest/modules/memory/types/entity_summary_memory.html |
0d995a98868f-9 | Human: What do you know about Deven & Sam?
AI: Deven and Sam are working on a hackathon project together, trying to add more complex memory structures to Langchain, including a key-value store for entities mentioned so far in the conversation. They seem to be working hard on this project and have a great idea for how the key-value store can help.
Human: Sam is the founder of a company called Daimon.
AI:
That's impressive! It sounds like Sam is a very successful entrepreneur. What kind of company is Daimon?
Last line:
Human: Sam is the founder of a company called Daimon.
You:
> Finished chain.
" That's impressive! It sounds like Sam is a very successful entrepreneur. What kind of company is Daimon?"
from pprint import pprint
pprint(conversation.memory.entity_store.store)
{'Daimon': 'Daimon is a company founded by Sam, a successful entrepreneur, who '
'is working on a hackathon project with Deven to add more complex '
'memory structures to Langchain.',
'Deven': 'Deven is working on a hackathon project with Sam, which they are '
'entering into a hackathon. They are trying to add more complex '
'memory structures to Langchain, including a key-value store for '
'entities mentioned so far in the conversation, and seem to be '
'working hard on this project with a great idea for how the '
'key-value store can help.',
'Key-Value Store': 'A key-value store is being added to the project to store '
'entities mentioned in the conversation.',
'Langchain': 'Langchain is a project that is trying to add more complex '
'memory structures, including a key-value store for entities ' | https://python.langchain.com/en/latest/modules/memory/types/entity_summary_memory.html |
0d995a98868f-10 | 'memory structures, including a key-value store for entities '
'mentioned so far in the conversation.',
'Sam': 'Sam is working on a hackathon project with Deven, trying to add more '
'complex memory structures to Langchain, including a key-value store '
'for entities mentioned so far in the conversation. They seem to have '
'a great idea for how the key-value store can help, and Sam is also '
'the founder of a successful company called Daimon.'}
conversation.predict(input="What do you know about Sam?")
> Entering new ConversationChain chain...
Prompt after formatting:
You are an assistant to a human, powered by a large language model trained by OpenAI.
You are designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, you are able to generate human-like text based on the input you receive, allowing you to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
You are constantly learning and improving, and your capabilities are constantly evolving. You are able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. You have access to some personalized information provided by the human in the Context section below. Additionally, you are able to generate your own text based on the input you receive, allowing you to engage in discussions and provide explanations and descriptions on a wide range of topics.
Overall, you are a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether the human needs help with a specific question or just wants to have a conversation about a particular topic, you are here to assist.
Context: | https://python.langchain.com/en/latest/modules/memory/types/entity_summary_memory.html |
0d995a98868f-11 | Context:
{'Deven': 'Deven is working on a hackathon project with Sam, which they are entering into a hackathon. They are trying to add more complex memory structures to Langchain, including a key-value store for entities mentioned so far in the conversation, and seem to be working hard on this project with a great idea for how the key-value store can help.', 'Sam': 'Sam is working on a hackathon project with Deven, trying to add more complex memory structures to Langchain, including a key-value store for entities mentioned so far in the conversation. They seem to have a great idea for how the key-value store can help, and Sam is also the founder of a successful company called Daimon.', 'Langchain': 'Langchain is a project that is trying to add more complex memory structures, including a key-value store for entities mentioned so far in the conversation.', 'Daimon': 'Daimon is a company founded by Sam, a successful entrepreneur, who is working on a hackathon project with Deven to add more complex memory structures to Langchain.'}
Current conversation:
Human: What do you know about Deven & Sam?
AI: Deven and Sam are working on a hackathon project together, trying to add more complex memory structures to Langchain, including a key-value store for entities mentioned so far in the conversation. They seem to be working hard on this project and have a great idea for how the key-value store can help.
Human: Sam is the founder of a company called Daimon.
AI:
That's impressive! It sounds like Sam is a very successful entrepreneur. What kind of company is Daimon?
Human: Sam is the founder of a company called Daimon.
AI: That's impressive! It sounds like Sam is a very successful entrepreneur. What kind of company is Daimon?
Last line: | https://python.langchain.com/en/latest/modules/memory/types/entity_summary_memory.html |
0d995a98868f-12 | Last line:
Human: What do you know about Sam?
You:
> Finished chain.
' Sam is the founder of a successful company called Daimon. He is also working on a hackathon project with Deven to add more complex memory structures to Langchain. They seem to have a great idea for how the key-value store can help.'
previous
ConversationBufferWindowMemory
next
Conversation Knowledge Graph Memory
Contents
Using in a chain
Inspecting the memory store
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/modules/memory/types/entity_summary_memory.html |
67d0748207ed-0 | .ipynb
.pdf
Conversation Knowledge Graph Memory
Contents
Using in a chain
Conversation Knowledge Graph Memory#
This type of memory uses a knowledge graph to recreate memory.
Let’s first walk through how to use the utilities
from langchain.memory import ConversationKGMemory
from langchain.llms import OpenAI
llm = OpenAI(temperature=0)
memory = ConversationKGMemory(llm=llm)
memory.save_context({"input": "say hi to sam"}, {"output": "who is sam"})
memory.save_context({"input": "sam is a friend"}, {"output": "okay"})
memory.load_memory_variables({"input": 'who is sam'})
{'history': 'On Sam: Sam is friend.'}
We can also get the history as a list of messages (this is useful if you are using this with a chat model).
memory = ConversationKGMemory(llm=llm, return_messages=True)
memory.save_context({"input": "say hi to sam"}, {"output": "who is sam"})
memory.save_context({"input": "sam is a friend"}, {"output": "okay"})
memory.load_memory_variables({"input": 'who is sam'})
{'history': [SystemMessage(content='On Sam: Sam is friend.', additional_kwargs={})]}
We can also more modularly get current entities from a new message (will use previous messages as context.)
memory.get_current_entities("what's Sams favorite color?")
['Sam']
We can also more modularly get knowledge triplets from a new message (will use previous messages as context.)
memory.get_knowledge_triplets("her favorite color is red")
[KnowledgeTriple(subject='Sam', predicate='favorite color', object_='red')]
Using in a chain#
Let’s now use this in a chain!
llm = OpenAI(temperature=0) | https://python.langchain.com/en/latest/modules/memory/types/kg.html |
67d0748207ed-1 | Let’s now use this in a chain!
llm = OpenAI(temperature=0)
from langchain.prompts.prompt import PromptTemplate
from langchain.chains import ConversationChain
template = """The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context.
If the AI does not know the answer to a question, it truthfully says it does not know. The AI ONLY uses information contained in the "Relevant Information" section and does not hallucinate.
Relevant Information:
{history}
Conversation:
Human: {input}
AI:"""
prompt = PromptTemplate(
input_variables=["history", "input"], template=template
)
conversation_with_kg = ConversationChain(
llm=llm,
verbose=True,
prompt=prompt,
memory=ConversationKGMemory(llm=llm)
)
conversation_with_kg.predict(input="Hi, what's up?")
> Entering new ConversationChain chain...
Prompt after formatting:
The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context.
If the AI does not know the answer to a question, it truthfully says it does not know. The AI ONLY uses information contained in the "Relevant Information" section and does not hallucinate.
Relevant Information:
Conversation:
Human: Hi, what's up?
AI:
> Finished chain.
" Hi there! I'm doing great. I'm currently in the process of learning about the world around me. I'm learning about different cultures, languages, and customs. It's really fascinating! How about you?"
conversation_with_kg.predict(input="My name is James and I'm helping Will. He's an engineer.") | https://python.langchain.com/en/latest/modules/memory/types/kg.html |
67d0748207ed-2 | > Entering new ConversationChain chain...
Prompt after formatting:
The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context.
If the AI does not know the answer to a question, it truthfully says it does not know. The AI ONLY uses information contained in the "Relevant Information" section and does not hallucinate.
Relevant Information:
Conversation:
Human: My name is James and I'm helping Will. He's an engineer.
AI:
> Finished chain.
" Hi James, it's nice to meet you. I'm an AI and I understand you're helping Will, the engineer. What kind of engineering does he do?"
conversation_with_kg.predict(input="What do you know about Will?")
> Entering new ConversationChain chain...
Prompt after formatting:
The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context.
If the AI does not know the answer to a question, it truthfully says it does not know. The AI ONLY uses information contained in the "Relevant Information" section and does not hallucinate.
Relevant Information:
On Will: Will is an engineer.
Conversation:
Human: What do you know about Will?
AI:
> Finished chain.
' Will is an engineer.'
previous
Entity Memory
next
ConversationSummaryMemory
Contents
Using in a chain
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/modules/memory/types/kg.html |
c7bd7b85ae20-0 | .ipynb
.pdf
ConversationTokenBufferMemory
Contents
Using in a chain
ConversationTokenBufferMemory#
ConversationTokenBufferMemory keeps a buffer of recent interactions in memory, and uses token length rather than number of interactions to determine when to flush interactions.
Let’s first walk through how to use the utilities
from langchain.memory import ConversationTokenBufferMemory
from langchain.llms import OpenAI
llm = OpenAI()
memory = ConversationTokenBufferMemory(llm=llm, max_token_limit=10)
memory.save_context({"input": "hi"}, {"output": "whats up"})
memory.save_context({"input": "not much you"}, {"output": "not much"})
memory.load_memory_variables({})
{'history': 'Human: not much you\nAI: not much'}
We can also get the history as a list of messages (this is useful if you are using this with a chat model).
memory = ConversationTokenBufferMemory(llm=llm, max_token_limit=10, return_messages=True)
memory.save_context({"input": "hi"}, {"output": "whats up"})
memory.save_context({"input": "not much you"}, {"output": "not much"})
Using in a chain#
Let’s walk through an example, again setting verbose=True so we can see the prompt.
from langchain.chains import ConversationChain
conversation_with_summary = ConversationChain(
llm=llm,
# We set a very low max_token_limit for the purposes of testing.
memory=ConversationTokenBufferMemory(llm=OpenAI(), max_token_limit=60),
verbose=True
)
conversation_with_summary.predict(input="Hi, what's up?")
> Entering new ConversationChain chain...
Prompt after formatting: | https://python.langchain.com/en/latest/modules/memory/types/token_buffer.html |
c7bd7b85ae20-1 | > Entering new ConversationChain chain...
Prompt after formatting:
The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
Human: Hi, what's up?
AI:
> Finished chain.
" Hi there! I'm doing great, just enjoying the day. How about you?"
conversation_with_summary.predict(input="Just working on writing some documentation!")
> Entering new ConversationChain chain...
Prompt after formatting:
The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
Human: Hi, what's up?
AI: Hi there! I'm doing great, just enjoying the day. How about you?
Human: Just working on writing some documentation!
AI:
> Finished chain.
' Sounds like a productive day! What kind of documentation are you writing?'
conversation_with_summary.predict(input="For LangChain! Have you heard of it?")
> Entering new ConversationChain chain...
Prompt after formatting:
The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
Human: Hi, what's up?
AI: Hi there! I'm doing great, just enjoying the day. How about you?
Human: Just working on writing some documentation!
AI: Sounds like a productive day! What kind of documentation are you writing? | https://python.langchain.com/en/latest/modules/memory/types/token_buffer.html |
c7bd7b85ae20-2 | AI: Sounds like a productive day! What kind of documentation are you writing?
Human: For LangChain! Have you heard of it?
AI:
> Finished chain.
" Yes, I have heard of LangChain! It is a decentralized language-learning platform that connects native speakers and learners in real time. Is that the documentation you're writing about?"
# We can see here that the buffer is updated
conversation_with_summary.predict(input="Haha nope, although a lot of people confuse it for that")
> Entering new ConversationChain chain...
Prompt after formatting:
The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
Human: For LangChain! Have you heard of it?
AI: Yes, I have heard of LangChain! It is a decentralized language-learning platform that connects native speakers and learners in real time. Is that the documentation you're writing about?
Human: Haha nope, although a lot of people confuse it for that
AI:
> Finished chain.
" Oh, I see. Is there another language learning platform you're referring to?"
previous
ConversationSummaryBufferMemory
next
VectorStore-Backed Memory
Contents
Using in a chain
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/modules/memory/types/token_buffer.html |
6394291c149b-0 | .ipynb
.pdf
ConversationBufferMemory
Contents
Using in a chain
ConversationBufferMemory#
This notebook shows how to use ConversationBufferMemory. This memory allows for storing of messages and then extracts the messages in a variable.
We can first extract it as a string.
from langchain.memory import ConversationBufferMemory
memory = ConversationBufferMemory()
memory.save_context({"input": "hi"}, {"output": "whats up"})
memory.load_memory_variables({})
{'history': 'Human: hi\nAI: whats up'}
We can also get the history as a list of messages (this is useful if you are using this with a chat model).
memory = ConversationBufferMemory(return_messages=True)
memory.save_context({"input": "hi"}, {"output": "whats up"})
memory.load_memory_variables({})
{'history': [HumanMessage(content='hi', additional_kwargs={}),
AIMessage(content='whats up', additional_kwargs={})]}
Using in a chain#
Finally, let’s take a look at using this in a chain (setting verbose=True so we can see the prompt).
from langchain.llms import OpenAI
from langchain.chains import ConversationChain
llm = OpenAI(temperature=0)
conversation = ConversationChain(
llm=llm,
verbose=True,
memory=ConversationBufferMemory()
)
conversation.predict(input="Hi there!")
> Entering new ConversationChain chain...
Prompt after formatting:
The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
Human: Hi there!
AI:
> Finished chain. | https://python.langchain.com/en/latest/modules/memory/types/buffer.html |
6394291c149b-1 | Current conversation:
Human: Hi there!
AI:
> Finished chain.
" Hi there! It's nice to meet you. How can I help you today?"
conversation.predict(input="I'm doing well! Just having a conversation with an AI.")
> Entering new ConversationChain chain...
Prompt after formatting:
The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
Human: Hi there!
AI: Hi there! It's nice to meet you. How can I help you today?
Human: I'm doing well! Just having a conversation with an AI.
AI:
> Finished chain.
" That's great! It's always nice to have a conversation with someone new. What would you like to talk about?"
conversation.predict(input="Tell me about yourself.")
> Entering new ConversationChain chain...
Prompt after formatting:
The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
Human: Hi there!
AI: Hi there! It's nice to meet you. How can I help you today?
Human: I'm doing well! Just having a conversation with an AI.
AI: That's great! It's always nice to have a conversation with someone new. What would you like to talk about?
Human: Tell me about yourself.
AI:
> Finished chain. | https://python.langchain.com/en/latest/modules/memory/types/buffer.html |
6394291c149b-2 | Human: Tell me about yourself.
AI:
> Finished chain.
" Sure! I'm an AI created to help people with their everyday tasks. I'm programmed to understand natural language and provide helpful information. I'm also constantly learning and updating my knowledge base so I can provide more accurate and helpful answers."
And that’s it for the getting started! There are plenty of different types of memory, check out our examples to see them all
previous
How-To Guides
next
ConversationBufferWindowMemory
Contents
Using in a chain
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/modules/memory/types/buffer.html |
69d6e1e48f05-0 | .ipynb
.pdf
ConversationSummaryMemory
Contents
Initializing with messages
Using in a chain
ConversationSummaryMemory#
Now let’s take a look at using a slightly more complex type of memory - ConversationSummaryMemory. This type of memory creates a summary of the conversation over time. This can be useful for condensing information from the conversation over time.
Let’s first explore the basic functionality of this type of memory.
from langchain.memory import ConversationSummaryMemory, ChatMessageHistory
from langchain.llms import OpenAI
memory = ConversationSummaryMemory(llm=OpenAI(temperature=0))
memory.save_context({"input": "hi"}, {"output": "whats up"})
memory.load_memory_variables({})
{'history': '\nThe human greets the AI, to which the AI responds.'}
We can also get the history as a list of messages (this is useful if you are using this with a chat model).
memory = ConversationSummaryMemory(llm=OpenAI(temperature=0), return_messages=True)
memory.save_context({"input": "hi"}, {"output": "whats up"})
memory.load_memory_variables({})
{'history': [SystemMessage(content='\nThe human greets the AI, to which the AI responds.', additional_kwargs={})]}
We can also utilize the predict_new_summary method directly.
messages = memory.chat_memory.messages
previous_summary = ""
memory.predict_new_summary(messages, previous_summary)
'\nThe human greets the AI, to which the AI responds.'
Initializing with messages#
If you have messages outside this class, you can easily initialize the class with ChatMessageHistory. During loading, a summary will be calculated.
history = ChatMessageHistory()
history.add_user_message("hi")
history.add_ai_message("hi there!") | https://python.langchain.com/en/latest/modules/memory/types/summary.html |
69d6e1e48f05-1 | history.add_user_message("hi")
history.add_ai_message("hi there!")
memory = ConversationSummaryMemory.from_messages(llm=OpenAI(temperature=0), chat_memory=history, return_messages=True)
memory.buffer
'\nThe human greets the AI, to which the AI responds with a friendly greeting.'
Using in a chain#
Let’s walk through an example of using this in a chain, again setting verbose=True so we can see the prompt.
from langchain.llms import OpenAI
from langchain.chains import ConversationChain
llm = OpenAI(temperature=0)
conversation_with_summary = ConversationChain(
llm=llm,
memory=ConversationSummaryMemory(llm=OpenAI()),
verbose=True
)
conversation_with_summary.predict(input="Hi, what's up?")
> Entering new ConversationChain chain...
Prompt after formatting:
The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
Human: Hi, what's up?
AI:
> Finished chain.
" Hi there! I'm doing great. I'm currently helping a customer with a technical issue. How about you?"
conversation_with_summary.predict(input="Tell me more about it!")
> Entering new ConversationChain chain...
Prompt after formatting:
The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
The human greeted the AI and asked how it was doing. The AI replied that it was doing great and was currently helping a customer with a technical issue. | https://python.langchain.com/en/latest/modules/memory/types/summary.html |
69d6e1e48f05-2 | Human: Tell me more about it!
AI:
> Finished chain.
" Sure! The customer is having trouble with their computer not connecting to the internet. I'm helping them troubleshoot the issue and figure out what the problem is. So far, we've tried resetting the router and checking the network settings, but the issue still persists. We're currently looking into other possible solutions."
conversation_with_summary.predict(input="Very cool -- what is the scope of the project?")
> Entering new ConversationChain chain...
Prompt after formatting:
The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
The human greeted the AI and asked how it was doing. The AI replied that it was doing great and was currently helping a customer with a technical issue where their computer was not connecting to the internet. The AI was troubleshooting the issue and had already tried resetting the router and checking the network settings, but the issue still persisted and they were looking into other possible solutions.
Human: Very cool -- what is the scope of the project?
AI:
> Finished chain.
" The scope of the project is to troubleshoot the customer's computer issue and find a solution that will allow them to connect to the internet. We are currently exploring different possibilities and have already tried resetting the router and checking the network settings, but the issue still persists."
previous
Conversation Knowledge Graph Memory
next
ConversationSummaryBufferMemory
Contents
Initializing with messages
Using in a chain
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/modules/memory/types/summary.html |
16b7d7f094b6-0 | .ipynb
.pdf
ConversationBufferWindowMemory
Contents
Using in a chain
ConversationBufferWindowMemory#
ConversationBufferWindowMemory keeps a list of the interactions of the conversation over time. It only uses the last K interactions. This can be useful for keeping a sliding window of the most recent interactions, so the buffer does not get too large
Let’s first explore the basic functionality of this type of memory.
from langchain.memory import ConversationBufferWindowMemory
memory = ConversationBufferWindowMemory( k=1)
memory.save_context({"input": "hi"}, {"output": "whats up"})
memory.save_context({"input": "not much you"}, {"output": "not much"})
memory.load_memory_variables({})
{'history': 'Human: not much you\nAI: not much'}
We can also get the history as a list of messages (this is useful if you are using this with a chat model).
memory = ConversationBufferWindowMemory( k=1, return_messages=True)
memory.save_context({"input": "hi"}, {"output": "whats up"})
memory.save_context({"input": "not much you"}, {"output": "not much"})
memory.load_memory_variables({})
{'history': [HumanMessage(content='not much you', additional_kwargs={}),
AIMessage(content='not much', additional_kwargs={})]}
Using in a chain#
Let’s walk through an example, again setting verbose=True so we can see the prompt.
from langchain.llms import OpenAI
from langchain.chains import ConversationChain
conversation_with_summary = ConversationChain(
llm=OpenAI(temperature=0),
# We set a low k=2, to only keep the last 2 interactions in memory
memory=ConversationBufferWindowMemory(k=2),
verbose=True
) | https://python.langchain.com/en/latest/modules/memory/types/buffer_window.html |
16b7d7f094b6-1 | memory=ConversationBufferWindowMemory(k=2),
verbose=True
)
conversation_with_summary.predict(input="Hi, what's up?")
> Entering new ConversationChain chain...
Prompt after formatting:
The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
Human: Hi, what's up?
AI:
> Finished chain.
" Hi there! I'm doing great. I'm currently helping a customer with a technical issue. How about you?"
conversation_with_summary.predict(input="What's their issues?")
> Entering new ConversationChain chain...
Prompt after formatting:
The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
Human: Hi, what's up?
AI: Hi there! I'm doing great. I'm currently helping a customer with a technical issue. How about you?
Human: What's their issues?
AI:
> Finished chain.
" The customer is having trouble connecting to their Wi-Fi network. I'm helping them troubleshoot the issue and get them connected."
conversation_with_summary.predict(input="Is it going well?")
> Entering new ConversationChain chain...
Prompt after formatting:
The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
Human: Hi, what's up? | https://python.langchain.com/en/latest/modules/memory/types/buffer_window.html |
16b7d7f094b6-2 | Current conversation:
Human: Hi, what's up?
AI: Hi there! I'm doing great. I'm currently helping a customer with a technical issue. How about you?
Human: What's their issues?
AI: The customer is having trouble connecting to their Wi-Fi network. I'm helping them troubleshoot the issue and get them connected.
Human: Is it going well?
AI:
> Finished chain.
" Yes, it's going well so far. We've already identified the problem and are now working on a solution."
# Notice here that the first interaction does not appear.
conversation_with_summary.predict(input="What's the solution?")
> Entering new ConversationChain chain...
Prompt after formatting:
The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
Human: What's their issues?
AI: The customer is having trouble connecting to their Wi-Fi network. I'm helping them troubleshoot the issue and get them connected.
Human: Is it going well?
AI: Yes, it's going well so far. We've already identified the problem and are now working on a solution.
Human: What's the solution?
AI:
> Finished chain.
" The solution is to reset the router and reconfigure the settings. We're currently in the process of doing that."
previous
ConversationBufferMemory
next
Entity Memory
Contents
Using in a chain
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/modules/memory/types/buffer_window.html |
2ba6e3aa3fac-0 | .ipynb
.pdf
ConversationSummaryBufferMemory
Contents
Using in a chain
ConversationSummaryBufferMemory#
ConversationSummaryBufferMemory combines the last two ideas. It keeps a buffer of recent interactions in memory, but rather than just completely flushing old interactions it compiles them into a summary and uses both. Unlike the previous implementation though, it uses token length rather than number of interactions to determine when to flush interactions.
Let’s first walk through how to use the utilities
from langchain.memory import ConversationSummaryBufferMemory
from langchain.llms import OpenAI
llm = OpenAI()
memory = ConversationSummaryBufferMemory(llm=llm, max_token_limit=10)
memory.save_context({"input": "hi"}, {"output": "whats up"})
memory.save_context({"input": "not much you"}, {"output": "not much"})
memory.load_memory_variables({})
{'history': 'System: \nThe human says "hi", and the AI responds with "whats up".\nHuman: not much you\nAI: not much'}
We can also get the history as a list of messages (this is useful if you are using this with a chat model).
memory = ConversationSummaryBufferMemory(llm=llm, max_token_limit=10, return_messages=True)
memory.save_context({"input": "hi"}, {"output": "whats up"})
memory.save_context({"input": "not much you"}, {"output": "not much"})
We can also utilize the predict_new_summary method directly.
messages = memory.chat_memory.messages
previous_summary = ""
memory.predict_new_summary(messages, previous_summary)
'\nThe human and AI state that they are not doing much.'
Using in a chain#
Let’s walk through an example, again setting verbose=True so we can see the prompt.
from langchain.chains import ConversationChain
conversation_with_summary = ConversationChain( | https://python.langchain.com/en/latest/modules/memory/types/summary_buffer.html |
2ba6e3aa3fac-1 | from langchain.chains import ConversationChain
conversation_with_summary = ConversationChain(
llm=llm,
# We set a very low max_token_limit for the purposes of testing.
memory=ConversationSummaryBufferMemory(llm=OpenAI(), max_token_limit=40),
verbose=True
)
conversation_with_summary.predict(input="Hi, what's up?")
> Entering new ConversationChain chain...
Prompt after formatting:
The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
Human: Hi, what's up?
AI:
> Finished chain.
" Hi there! I'm doing great. I'm learning about the latest advances in artificial intelligence. What about you?"
conversation_with_summary.predict(input="Just working on writing some documentation!")
> Entering new ConversationChain chain...
Prompt after formatting:
The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
Human: Hi, what's up?
AI: Hi there! I'm doing great. I'm spending some time learning about the latest developments in AI technology. How about you?
Human: Just working on writing some documentation!
AI:
> Finished chain.
' That sounds like a great use of your time. Do you have experience with writing documentation?'
# We can see here that there is a summary of the conversation and then some previous interactions
conversation_with_summary.predict(input="For LangChain! Have you heard of it?")
> Entering new ConversationChain chain... | https://python.langchain.com/en/latest/modules/memory/types/summary_buffer.html |
2ba6e3aa3fac-2 | > Entering new ConversationChain chain...
Prompt after formatting:
The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
System:
The human asked the AI what it was up to and the AI responded that it was learning about the latest developments in AI technology.
Human: Just working on writing some documentation!
AI: That sounds like a great use of your time. Do you have experience with writing documentation?
Human: For LangChain! Have you heard of it?
AI:
> Finished chain.
" No, I haven't heard of LangChain. Can you tell me more about it?"
# We can see here that the summary and the buffer are updated
conversation_with_summary.predict(input="Haha nope, although a lot of people confuse it for that")
> Entering new ConversationChain chain...
Prompt after formatting:
The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
System:
The human asked the AI what it was up to and the AI responded that it was learning about the latest developments in AI technology. The human then mentioned they were writing documentation, to which the AI responded that it sounded like a great use of their time and asked if they had experience with writing documentation.
Human: For LangChain! Have you heard of it?
AI: No, I haven't heard of LangChain. Can you tell me more about it?
Human: Haha nope, although a lot of people confuse it for that
AI:
> Finished chain. | https://python.langchain.com/en/latest/modules/memory/types/summary_buffer.html |
2ba6e3aa3fac-3 | AI:
> Finished chain.
' Oh, okay. What is LangChain?'
previous
ConversationSummaryMemory
next
ConversationTokenBufferMemory
Contents
Using in a chain
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/modules/memory/types/summary_buffer.html |
c969727313a8-0 | .ipynb
.pdf
VectorStore-Backed Memory
Contents
Initialize your VectorStore
Create your the VectorStoreRetrieverMemory
Using in a chain
VectorStore-Backed Memory#
VectorStoreRetrieverMemory stores memories in a VectorDB and queries the top-K most “salient” docs every time it is called.
This differs from most of the other Memory classes in that it doesn’t explicitly track the order of interactions.
In this case, the “docs” are previous conversation snippets. This can be useful to refer to relevant pieces of information that the AI was told earlier in the conversation.
from datetime import datetime
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.llms import OpenAI
from langchain.memory import VectorStoreRetrieverMemory
from langchain.chains import ConversationChain
from langchain.prompts import PromptTemplate
Initialize your VectorStore#
Depending on the store you choose, this step may look different. Consult the relevant VectorStore documentation for more details.
import faiss
from langchain.docstore import InMemoryDocstore
from langchain.vectorstores import FAISS
embedding_size = 1536 # Dimensions of the OpenAIEmbeddings
index = faiss.IndexFlatL2(embedding_size)
embedding_fn = OpenAIEmbeddings().embed_query
vectorstore = FAISS(embedding_fn, index, InMemoryDocstore({}), {})
Create your the VectorStoreRetrieverMemory#
The memory object is instantiated from any VectorStoreRetriever.
# In actual usage, you would set `k` to be a higher value, but we use k=1 to show that
# the vector lookup still returns the semantically relevant information
retriever = vectorstore.as_retriever(search_kwargs=dict(k=1))
memory = VectorStoreRetrieverMemory(retriever=retriever) | https://python.langchain.com/en/latest/modules/memory/types/vectorstore_retriever_memory.html |
c969727313a8-1 | memory = VectorStoreRetrieverMemory(retriever=retriever)
# When added to an agent, the memory object can save pertinent information from conversations or used tools
memory.save_context({"input": "My favorite food is pizza"}, {"output": "thats good to know"})
memory.save_context({"input": "My favorite sport is soccer"}, {"output": "..."})
memory.save_context({"input": "I don't the Celtics"}, {"output": "ok"}) #
# Notice the first result returned is the memory pertaining to tax help, which the language model deems more semantically relevant
# to a 1099 than the other documents, despite them both containing numbers.
print(memory.load_memory_variables({"prompt": "what sport should i watch?"})["history"])
input: My favorite sport is soccer
output: ...
Using in a chain#
Let’s walk through an example, again setting verbose=True so we can see the prompt.
llm = OpenAI(temperature=0) # Can be any valid LLM
_DEFAULT_TEMPLATE = """The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Relevant pieces of previous conversation:
{history}
(You do not need to use these pieces of information if not relevant)
Current conversation:
Human: {input}
AI:"""
PROMPT = PromptTemplate(
input_variables=["history", "input"], template=_DEFAULT_TEMPLATE
)
conversation_with_summary = ConversationChain(
llm=llm,
prompt=PROMPT,
# We set a very low max_token_limit for the purposes of testing.
memory=memory,
verbose=True
) | https://python.langchain.com/en/latest/modules/memory/types/vectorstore_retriever_memory.html |
c969727313a8-2 | memory=memory,
verbose=True
)
conversation_with_summary.predict(input="Hi, my name is Perry, what's up?")
> Entering new ConversationChain chain...
Prompt after formatting:
The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Relevant pieces of previous conversation:
input: My favorite food is pizza
output: thats good to know
(You do not need to use these pieces of information if not relevant)
Current conversation:
Human: Hi, my name is Perry, what's up?
AI:
> Finished chain.
" Hi Perry, I'm doing well. How about you?"
# Here, the basketball related content is surfaced
conversation_with_summary.predict(input="what's my favorite sport?")
> Entering new ConversationChain chain...
Prompt after formatting:
The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Relevant pieces of previous conversation:
input: My favorite sport is soccer
output: ...
(You do not need to use these pieces of information if not relevant)
Current conversation:
Human: what's my favorite sport?
AI:
> Finished chain.
' You told me earlier that your favorite sport is soccer.'
# Even though the language model is stateless, since relavent memory is fetched, it can "reason" about the time.
# Timestamping memories and data is useful in general to let the agent determine temporal relevance
conversation_with_summary.predict(input="Whats my favorite food")
> Entering new ConversationChain chain... | https://python.langchain.com/en/latest/modules/memory/types/vectorstore_retriever_memory.html |
c969727313a8-3 | conversation_with_summary.predict(input="Whats my favorite food")
> Entering new ConversationChain chain...
Prompt after formatting:
The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Relevant pieces of previous conversation:
input: My favorite food is pizza
output: thats good to know
(You do not need to use these pieces of information if not relevant)
Current conversation:
Human: Whats my favorite food
AI:
> Finished chain.
' You said your favorite food is pizza.'
# The memories from the conversation are automatically stored,
# since this query best matches the introduction chat above,
# the agent is able to 'remember' the user's name.
conversation_with_summary.predict(input="What's my name?")
> Entering new ConversationChain chain...
Prompt after formatting:
The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Relevant pieces of previous conversation:
input: Hi, my name is Perry, what's up?
response: Hi Perry, I'm doing well. How about you?
(You do not need to use these pieces of information if not relevant)
Current conversation:
Human: What's my name?
AI:
> Finished chain.
' Your name is Perry.'
previous
ConversationTokenBufferMemory
next
How to add Memory to an LLMChain
Contents
Initialize your VectorStore
Create your the VectorStoreRetrieverMemory
Using in a chain
By Harrison Chase
© Copyright 2023, Harrison Chase. | https://python.langchain.com/en/latest/modules/memory/types/vectorstore_retriever_memory.html |
c969727313a8-4 | By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/modules/memory/types/vectorstore_retriever_memory.html |
69fd088513d7-0 | .ipynb
.pdf
Motörhead Memory
Contents
Setup
Motörhead Memory#
Motörhead is a memory server implemented in Rust. It automatically handles incremental summarization in the background and allows for stateless applications.
Setup#
See instructions at Motörhead for running the server locally.
from langchain.memory.motorhead_memory import MotorheadMemory
from langchain import OpenAI, LLMChain, PromptTemplate
template = """You are a chatbot having a conversation with a human.
{chat_history}
Human: {human_input}
AI:"""
prompt = PromptTemplate(
input_variables=["chat_history", "human_input"],
template=template
)
memory = MotorheadMemory(
session_id="testing-1",
url="http://localhost:8080",
memory_key="chat_history"
)
await memory.init(); # loads previous state from Motörhead 🤘
llm_chain = LLMChain(
llm=OpenAI(),
prompt=prompt,
verbose=True,
memory=memory,
)
llm_chain.run("hi im bob")
> Entering new LLMChain chain...
Prompt after formatting:
You are a chatbot having a conversation with a human.
Human: hi im bob
AI:
> Finished chain.
' Hi Bob, nice to meet you! How are you doing today?'
llm_chain.run("whats my name?")
> Entering new LLMChain chain...
Prompt after formatting:
You are a chatbot having a conversation with a human.
Human: hi im bob
AI: Hi Bob, nice to meet you! How are you doing today?
Human: whats my name?
AI:
> Finished chain. | https://python.langchain.com/en/latest/modules/memory/examples/motorhead_memory.html |
69fd088513d7-1 | Human: whats my name?
AI:
> Finished chain.
' You said your name is Bob. Is that correct?'
llm_chain.run("whats for dinner?")
> Entering new LLMChain chain...
Prompt after formatting:
You are a chatbot having a conversation with a human.
Human: hi im bob
AI: Hi Bob, nice to meet you! How are you doing today?
Human: whats my name?
AI: You said your name is Bob. Is that correct?
Human: whats for dinner?
AI:
> Finished chain.
" I'm sorry, I'm not sure what you're asking. Could you please rephrase your question?"
previous
Mongodb Chat Message History
next
How to use multiple memory classes in the same chain
Contents
Setup
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/modules/memory/examples/motorhead_memory.html |
a90a033ec520-0 | .ipynb
.pdf
Adding Message Memory backed by a database to an Agent
Adding Message Memory backed by a database to an Agent#
This notebook goes over adding memory to an Agent where the memory uses an external message store. Before going through this notebook, please walkthrough the following notebooks, as this will build on top of both of them:
Adding memory to an LLM Chain
Custom Agents
Agent with Memory
In order to add a memory with an external message store to an agent we are going to do the following steps:
We are going to create a RedisChatMessageHistory to connect to an external database to store the messages in.
We are going to create an LLMChain using that chat history as memory.
We are going to use that LLMChain to create a custom Agent.
For the purposes of this exercise, we are going to create a simple custom Agent that has access to a search tool and utilizes the ConversationBufferMemory class.
from langchain.agents import ZeroShotAgent, Tool, AgentExecutor
from langchain.memory import ConversationBufferMemory
from langchain.memory.chat_memory import ChatMessageHistory
from langchain.memory.chat_message_histories import RedisChatMessageHistory
from langchain import OpenAI, LLMChain
from langchain.utilities import GoogleSearchAPIWrapper
search = GoogleSearchAPIWrapper()
tools = [
Tool(
name = "Search",
func=search.run,
description="useful for when you need to answer questions about current events"
)
]
Notice the usage of the chat_history variable in the PromptTemplate, which matches up with the dynamic key name in the ConversationBufferMemory.
prefix = """Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:"""
suffix = """Begin!"
{chat_history}
Question: {input}
{agent_scratchpad}""" | https://python.langchain.com/en/latest/modules/memory/examples/agent_with_memory_in_db.html |
a90a033ec520-1 | {chat_history}
Question: {input}
{agent_scratchpad}"""
prompt = ZeroShotAgent.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
input_variables=["input", "chat_history", "agent_scratchpad"]
)
Now we can create the ChatMessageHistory backed by the database.
message_history = RedisChatMessageHistory(url='redis://localhost:6379/0', ttl=600, session_id='my-session')
memory = ConversationBufferMemory(memory_key="chat_history", chat_memory=message_history)
We can now construct the LLMChain, with the Memory object, and then create the agent.
llm_chain = LLMChain(llm=OpenAI(temperature=0), prompt=prompt)
agent = ZeroShotAgent(llm_chain=llm_chain, tools=tools, verbose=True)
agent_chain = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True, memory=memory)
agent_chain.run(input="How many people live in canada?")
> Entering new AgentExecutor chain...
Thought: I need to find out the population of Canada
Action: Search
Action Input: Population of Canada | https://python.langchain.com/en/latest/modules/memory/examples/agent_with_memory_in_db.html |
a90a033ec520-2 | Action: Search
Action Input: Population of Canada
Observation: The current population of Canada is 38,566,192 as of Saturday, December 31, 2022, based on Worldometer elaboration of the latest United Nations data. · Canada ... Additional information related to Canadian population trends can be found on Statistics Canada's Population and Demography Portal. Population of Canada (real- ... Index to the latest information from the Census of Population. This survey conducted by Statistics Canada provides a statistical portrait of Canada and its ... 14 records ... Estimated number of persons by quarter of a year and by year, Canada, provinces and territories. The 2021 Canadian census counted a total population of 36,991,981, an increase of around 5.2 percent over the 2016 figure. ... Between 1990 and 2008, the ... ( 2 ) Census reports and other statistical publications from national statistical offices, ( 3 ) Eurostat: Demographic Statistics, ( 4 ) United Nations ... Canada is a country in North America. Its ten provinces and three territories extend from ... Population. • Q4 2022 estimate. 39,292,355 (37th). Information is available for the total Indigenous population and each of the three ... The term 'Aboriginal' or 'Indigenous' used on the Statistics Canada ... Jun 14, 2022 ... Determinants of health are the broad range of personal, social, economic and environmental factors that determine individual and population ... COVID-19 vaccination coverage across Canada by demographics and key populations. Updated every Friday at 12:00 PM Eastern Time.
Thought: I now know the final answer
Final Answer: The current population of Canada is 38,566,192 as of Saturday, December 31, 2022, based on Worldometer elaboration of the latest United Nations data.
> Finished AgentExecutor chain. | https://python.langchain.com/en/latest/modules/memory/examples/agent_with_memory_in_db.html |
a90a033ec520-3 | > Finished AgentExecutor chain.
'The current population of Canada is 38,566,192 as of Saturday, December 31, 2022, based on Worldometer elaboration of the latest United Nations data.'
To test the memory of this agent, we can ask a followup question that relies on information in the previous exchange to be answered correctly.
agent_chain.run(input="what is their national anthem called?")
> Entering new AgentExecutor chain...
Thought: I need to find out what the national anthem of Canada is called.
Action: Search
Action Input: National Anthem of Canada | https://python.langchain.com/en/latest/modules/memory/examples/agent_with_memory_in_db.html |
a90a033ec520-4 | Action: Search
Action Input: National Anthem of Canada
Observation: Jun 7, 2010 ... https://twitter.com/CanadaImmigrantCanadian National Anthem O Canada in HQ - complete with lyrics, captions, vocals & music.LYRICS:O Canada! Nov 23, 2022 ... After 100 years of tradition, O Canada was proclaimed Canada's national anthem in 1980. The music for O Canada was composed in 1880 by Calixa ... O Canada, national anthem of Canada. It was proclaimed the official national anthem on July 1, 1980. “God Save the Queen” remains the royal anthem of Canada ... O Canada! Our home and native land! True patriot love in all of us command. Car ton bras sait porter l'épée,. Il sait porter la croix! "O Canada" (French: Ô Canada) is the national anthem of Canada. The song was originally commissioned by Lieutenant Governor of Quebec Théodore Robitaille ... Feb 1, 2018 ... It was a simple tweak — just two words. But with that, Canada just voted to make its national anthem, “O Canada,” gender neutral, ... "O Canada" was proclaimed Canada's national anthem on July 1,. 1980, 100 years after it was first sung on June 24, 1880. The music. Patriotic music in Canada dates back over 200 years as a distinct category from British or French patriotism, preceding the first legal steps to ... Feb 4, 2022 ... English version: O Canada! Our home and native land! True patriot love in all of us command. With glowing hearts we ... Feb 1, 2018 ... Canada's Senate has passed a bill making the country's national anthem gender-neutral. If you're not familiar with the words to “O Canada,” ... | https://python.langchain.com/en/latest/modules/memory/examples/agent_with_memory_in_db.html |
a90a033ec520-5 | Thought: I now know the final answer.
Final Answer: The national anthem of Canada is called "O Canada".
> Finished AgentExecutor chain.
'The national anthem of Canada is called "O Canada".'
We can see that the agent remembered that the previous question was about Canada, and properly asked Google Search what the name of Canada’s national anthem was.
For fun, let’s compare this to an agent that does NOT have memory.
prefix = """Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:"""
suffix = """Begin!"
Question: {input}
{agent_scratchpad}"""
prompt = ZeroShotAgent.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
input_variables=["input", "agent_scratchpad"]
)
llm_chain = LLMChain(llm=OpenAI(temperature=0), prompt=prompt)
agent = ZeroShotAgent(llm_chain=llm_chain, tools=tools, verbose=True)
agent_without_memory = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True)
agent_without_memory.run("How many people live in canada?")
> Entering new AgentExecutor chain...
Thought: I need to find out the population of Canada
Action: Search
Action Input: Population of Canada | https://python.langchain.com/en/latest/modules/memory/examples/agent_with_memory_in_db.html |
a90a033ec520-6 | Action: Search
Action Input: Population of Canada
Observation: The current population of Canada is 38,566,192 as of Saturday, December 31, 2022, based on Worldometer elaboration of the latest United Nations data. · Canada ... Additional information related to Canadian population trends can be found on Statistics Canada's Population and Demography Portal. Population of Canada (real- ... Index to the latest information from the Census of Population. This survey conducted by Statistics Canada provides a statistical portrait of Canada and its ... 14 records ... Estimated number of persons by quarter of a year and by year, Canada, provinces and territories. The 2021 Canadian census counted a total population of 36,991,981, an increase of around 5.2 percent over the 2016 figure. ... Between 1990 and 2008, the ... ( 2 ) Census reports and other statistical publications from national statistical offices, ( 3 ) Eurostat: Demographic Statistics, ( 4 ) United Nations ... Canada is a country in North America. Its ten provinces and three territories extend from ... Population. • Q4 2022 estimate. 39,292,355 (37th). Information is available for the total Indigenous population and each of the three ... The term 'Aboriginal' or 'Indigenous' used on the Statistics Canada ... Jun 14, 2022 ... Determinants of health are the broad range of personal, social, economic and environmental factors that determine individual and population ... COVID-19 vaccination coverage across Canada by demographics and key populations. Updated every Friday at 12:00 PM Eastern Time.
Thought: I now know the final answer
Final Answer: The current population of Canada is 38,566,192 as of Saturday, December 31, 2022, based on Worldometer elaboration of the latest United Nations data.
> Finished AgentExecutor chain. | https://python.langchain.com/en/latest/modules/memory/examples/agent_with_memory_in_db.html |
a90a033ec520-7 | > Finished AgentExecutor chain.
'The current population of Canada is 38,566,192 as of Saturday, December 31, 2022, based on Worldometer elaboration of the latest United Nations data.'
agent_without_memory.run("what is their national anthem called?")
> Entering new AgentExecutor chain...
Thought: I should look up the answer
Action: Search
Action Input: national anthem of [country] | https://python.langchain.com/en/latest/modules/memory/examples/agent_with_memory_in_db.html |
a90a033ec520-8 | Action: Search
Action Input: national anthem of [country]
Observation: Most nation states have an anthem, defined as "a song, as of praise, devotion, or patriotism"; most anthems are either marches or hymns in style. List of all countries around the world with its national anthem. ... Title and lyrics in the language of the country and translated into English, Aug 1, 2021 ... 1. Afghanistan, "Milli Surood" (National Anthem) · 2. Armenia, "Mer Hayrenik" (Our Fatherland) · 3. Azerbaijan (a transcontinental country with ... A national anthem is a patriotic musical composition symbolizing and evoking eulogies of the history and traditions of a country or nation. National Anthem of Every Country ; Fiji, “Meda Dau Doka” (“God Bless Fiji”) ; Finland, “Maamme”. (“Our Land”) ; France, “La Marseillaise” (“The Marseillaise”). You can find an anthem in the menu at the top alphabetically or you can use the search feature. This site is focussed on the scholarly study of national anthems ... Feb 13, 2022 ... The 38-year-old country music artist had the honor of singing the National Anthem during this year's big game, and she did not disappoint. Oldest of the World's National Anthems ; France, La Marseillaise (“The Marseillaise”), 1795 ; Argentina, Himno Nacional Argentino (“Argentine National Anthem”) ... Mar 3, 2022 ... Country music star Jessie James Decker gained the respect of music and hockey fans alike after a jaw-dropping rendition of "The Star-Spangled ... This list shows the country on the left, the national anthem in the ... There are many countries over the world who have a national anthem of their own. | https://python.langchain.com/en/latest/modules/memory/examples/agent_with_memory_in_db.html |
a90a033ec520-9 | Thought: I now know the final answer
Final Answer: The national anthem of [country] is [name of anthem].
> Finished AgentExecutor chain.
'The national anthem of [country] is [name of anthem].'
previous
How to add Memory to an Agent
next
Cassandra Chat Message History
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/modules/memory/examples/agent_with_memory_in_db.html |
825d10f0fcd3-0 | .ipynb
.pdf
How to create a custom Memory class
How to create a custom Memory class#
Although there are a few predefined types of memory in LangChain, it is highly possible you will want to add your own type of memory that is optimal for your application. This notebook covers how to do that.
For this notebook, we will add a custom memory type to ConversationChain. In order to add a custom memory class, we need to import the base memory class and subclass it.
from langchain import OpenAI, ConversationChain
from langchain.schema import BaseMemory
from pydantic import BaseModel
from typing import List, Dict, Any
In this example, we will write a custom memory class that uses spacy to extract entities and save information about them in a simple hash table. Then, during the conversation, we will look at the input text, extract any entities, and put any information about them into the context.
Please note that this implementation is pretty simple and brittle and probably not useful in a production setting. Its purpose is to showcase that you can add custom memory implementations.
For this, we will need spacy.
# !pip install spacy
# !python -m spacy download en_core_web_lg
import spacy
nlp = spacy.load('en_core_web_lg')
class SpacyEntityMemory(BaseMemory, BaseModel):
"""Memory class for storing information about entities."""
# Define dictionary to store information about entities.
entities: dict = {}
# Define key to pass information about entities into prompt.
memory_key: str = "entities"
def clear(self):
self.entities = {}
@property
def memory_variables(self) -> List[str]:
"""Define the variables we are providing to the prompt."""
return [self.memory_key] | https://python.langchain.com/en/latest/modules/memory/examples/custom_memory.html |
825d10f0fcd3-1 | """Define the variables we are providing to the prompt."""
return [self.memory_key]
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Load the memory variables, in this case the entity key."""
# Get the input text and run through spacy
doc = nlp(inputs[list(inputs.keys())[0]])
# Extract known information about entities, if they exist.
entities = [self.entities[str(ent)] for ent in doc.ents if str(ent) in self.entities]
# Return combined information about entities to put into context.
return {self.memory_key: "\n".join(entities)}
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
# Get the input text and run through spacy
text = inputs[list(inputs.keys())[0]]
doc = nlp(text)
# For each entity that was mentioned, save this information to the dictionary.
for ent in doc.ents:
ent_str = str(ent)
if ent_str in self.entities:
self.entities[ent_str] += f"\n{text}"
else:
self.entities[ent_str] = text
We now define a prompt that takes in information about entities as well as user input
from langchain.prompts.prompt import PromptTemplate
template = """The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know. You are provided with information about entities the Human mentions, if relevant.
Relevant entity information:
{entities}
Conversation:
Human: {input}
AI:""" | https://python.langchain.com/en/latest/modules/memory/examples/custom_memory.html |
825d10f0fcd3-2 | {entities}
Conversation:
Human: {input}
AI:"""
prompt = PromptTemplate(
input_variables=["entities", "input"], template=template
)
And now we put it all together!
llm = OpenAI(temperature=0)
conversation = ConversationChain(llm=llm, prompt=prompt, verbose=True, memory=SpacyEntityMemory())
In the first example, with no prior knowledge about Harrison, the “Relevant entity information” section is empty.
conversation.predict(input="Harrison likes machine learning")
> Entering new ConversationChain chain...
Prompt after formatting:
The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know. You are provided with information about entities the Human mentions, if relevant.
Relevant entity information:
Conversation:
Human: Harrison likes machine learning
AI:
> Finished ConversationChain chain.
" That's great to hear! Machine learning is a fascinating field of study. It involves using algorithms to analyze data and make predictions. Have you ever studied machine learning, Harrison?"
Now in the second example, we can see that it pulls in information about Harrison.
conversation.predict(input="What do you think Harrison's favorite subject in college was?")
> Entering new ConversationChain chain...
Prompt after formatting:
The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know. You are provided with information about entities the Human mentions, if relevant.
Relevant entity information:
Harrison likes machine learning
Conversation: | https://python.langchain.com/en/latest/modules/memory/examples/custom_memory.html |
825d10f0fcd3-3 | Relevant entity information:
Harrison likes machine learning
Conversation:
Human: What do you think Harrison's favorite subject in college was?
AI:
> Finished ConversationChain chain.
' From what I know about Harrison, I believe his favorite subject in college was machine learning. He has expressed a strong interest in the subject and has mentioned it often.'
Again, please note that this implementation is pretty simple and brittle and probably not useful in a production setting. Its purpose is to showcase that you can add custom memory implementations.
previous
How to customize conversational memory
next
Dynamodb Chat Message History
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/modules/memory/examples/custom_memory.html |
b2bcebc99eb3-0 | .ipynb
.pdf
How to customize conversational memory
Contents
AI Prefix
Human Prefix
How to customize conversational memory#
This notebook walks through a few ways to customize conversational memory.
from langchain.llms import OpenAI
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemory
llm = OpenAI(temperature=0)
AI Prefix#
The first way to do so is by changing the AI prefix in the conversation summary. By default, this is set to “AI”, but you can set this to be anything you want. Note that if you change this, you should also change the prompt used in the chain to reflect this naming change. Let’s walk through an example of that in the example below.
# Here it is by default set to "AI"
conversation = ConversationChain(
llm=llm,
verbose=True,
memory=ConversationBufferMemory()
)
conversation.predict(input="Hi there!")
> Entering new ConversationChain chain...
Prompt after formatting:
The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
Human: Hi there!
AI:
> Finished ConversationChain chain.
" Hi there! It's nice to meet you. How can I help you today?"
conversation.predict(input="What's the weather?")
> Entering new ConversationChain chain...
Prompt after formatting:
The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
Human: Hi there! | https://python.langchain.com/en/latest/modules/memory/examples/conversational_customization.html |
b2bcebc99eb3-1 | Current conversation:
Human: Hi there!
AI: Hi there! It's nice to meet you. How can I help you today?
Human: What's the weather?
AI:
> Finished ConversationChain chain.
' The current weather is sunny and warm with a temperature of 75 degrees Fahrenheit. The forecast for the next few days is sunny with temperatures in the mid-70s.'
# Now we can override it and set it to "AI Assistant"
from langchain.prompts.prompt import PromptTemplate
template = """The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
{history}
Human: {input}
AI Assistant:"""
PROMPT = PromptTemplate(
input_variables=["history", "input"], template=template
)
conversation = ConversationChain(
prompt=PROMPT,
llm=llm,
verbose=True,
memory=ConversationBufferMemory(ai_prefix="AI Assistant")
)
conversation.predict(input="Hi there!")
> Entering new ConversationChain chain...
Prompt after formatting:
The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
Human: Hi there!
AI Assistant:
> Finished ConversationChain chain.
" Hi there! It's nice to meet you. How can I help you today?"
conversation.predict(input="What's the weather?")
> Entering new ConversationChain chain...
Prompt after formatting: | https://python.langchain.com/en/latest/modules/memory/examples/conversational_customization.html |
b2bcebc99eb3-2 | > Entering new ConversationChain chain...
Prompt after formatting:
The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
Human: Hi there!
AI Assistant: Hi there! It's nice to meet you. How can I help you today?
Human: What's the weather?
AI Assistant:
> Finished ConversationChain chain.
' The current weather is sunny and warm with a temperature of 75 degrees Fahrenheit. The forecast for the rest of the day is sunny with a high of 78 degrees and a low of 65 degrees.'
Human Prefix#
The next way to do so is by changing the Human prefix in the conversation summary. By default, this is set to “Human”, but you can set this to be anything you want. Note that if you change this, you should also change the prompt used in the chain to reflect this naming change. Let’s walk through an example of that in the example below.
# Now we can override it and set it to "Friend"
from langchain.prompts.prompt import PromptTemplate
template = """The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
{history}
Friend: {input}
AI:"""
PROMPT = PromptTemplate(
input_variables=["history", "input"], template=template
)
conversation = ConversationChain(
prompt=PROMPT,
llm=llm,
verbose=True,
memory=ConversationBufferMemory(human_prefix="Friend")
) | https://python.langchain.com/en/latest/modules/memory/examples/conversational_customization.html |
b2bcebc99eb3-3 | verbose=True,
memory=ConversationBufferMemory(human_prefix="Friend")
)
conversation.predict(input="Hi there!")
> Entering new ConversationChain chain...
Prompt after formatting:
The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
Friend: Hi there!
AI:
> Finished ConversationChain chain.
" Hi there! It's nice to meet you. How can I help you today?"
conversation.predict(input="What's the weather?")
> Entering new ConversationChain chain...
Prompt after formatting:
The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
Friend: Hi there!
AI: Hi there! It's nice to meet you. How can I help you today?
Friend: What's the weather?
AI:
> Finished ConversationChain chain.
' The weather right now is sunny and warm with a temperature of 75 degrees Fahrenheit. The forecast for the rest of the day is mostly sunny with a high of 82 degrees.'
previous
Cassandra Chat Message History
next
How to create a custom Memory class
Contents
AI Prefix
Human Prefix
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/modules/memory/examples/conversational_customization.html |
d8e215333edc-0 | .ipynb
.pdf
How to add Memory to an LLMChain
How to add Memory to an LLMChain#
This notebook goes over how to use the Memory class with an LLMChain. For the purposes of this walkthrough, we will add the ConversationBufferMemory class, although this can be any memory class.
from langchain.memory import ConversationBufferMemory
from langchain import OpenAI, LLMChain, PromptTemplate
The most important step is setting up the prompt correctly. In the below prompt, we have two input keys: one for the actual input, another for the input from the Memory class. Importantly, we make sure the keys in the PromptTemplate and the ConversationBufferMemory match up (chat_history).
template = """You are a chatbot having a conversation with a human.
{chat_history}
Human: {human_input}
Chatbot:"""
prompt = PromptTemplate(
input_variables=["chat_history", "human_input"],
template=template
)
memory = ConversationBufferMemory(memory_key="chat_history")
llm_chain = LLMChain(
llm=OpenAI(),
prompt=prompt,
verbose=True,
memory=memory,
)
llm_chain.predict(human_input="Hi there my friend")
> Entering new LLMChain chain...
Prompt after formatting:
You are a chatbot having a conversation with a human.
Human: Hi there my friend
Chatbot:
> Finished LLMChain chain.
' Hi there, how are you doing today?'
llm_chain.predict(human_input="Not too bad - how are you?")
> Entering new LLMChain chain...
Prompt after formatting:
You are a chatbot having a conversation with a human.
Human: Hi there my friend
AI: Hi there, how are you doing today? | https://python.langchain.com/en/latest/modules/memory/examples/adding_memory.html |
d8e215333edc-1 | Human: Hi there my friend
AI: Hi there, how are you doing today?
Human: Not to bad - how are you?
Chatbot:
> Finished LLMChain chain.
" I'm doing great, thank you for asking!"
previous
VectorStore-Backed Memory
next
How to add memory to a Multi-Input Chain
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/modules/memory/examples/adding_memory.html |
2677b93172db-0 | .ipynb
.pdf
Cassandra Chat Message History
Cassandra Chat Message History#
This notebook goes over how to use Cassandra to store chat message history.
Cassandra is a distributed database that is well suited for storing large amounts of data.
It is a good choice for storing chat message history because it is easy to scale and can handle a large number of writes.
# List of contact points to try connecting to Cassandra cluster.
contact_points = ["cassandra"]
from langchain.memory import CassandraChatMessageHistory
message_history = CassandraChatMessageHistory(
contact_points=contact_points, session_id="test-session"
)
message_history.add_user_message("hi!")
message_history.add_ai_message("whats up?")
message_history.messages
[HumanMessage(content='hi!', additional_kwargs={}, example=False),
AIMessage(content='whats up?', additional_kwargs={}, example=False)]
previous
Adding Message Memory backed by a database to an Agent
next
How to customize conversational memory
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/modules/memory/examples/cassandra_chat_message_history.html |
637bf9c22e86-0 | .ipynb
.pdf
Momento
Momento#
This notebook goes over how to use Momento Cache to store chat message history using the MomentoChatMessageHistory class. See the Momento docs for more detail on how to get set up with Momento.
Note that, by default we will create a cache if one with the given name doesn’t already exist.
You’ll need to get a Momento auth token to use this class. This can either be passed in to a momento.CacheClient if you’d like to instantiate that directly, as a named parameter auth_token to MomentoChatMessageHistory.from_client_params, or can just be set as an environment variable MOMENTO_AUTH_TOKEN.
from datetime import timedelta
from langchain.memory import MomentoChatMessageHistory
session_id = "foo"
cache_name = "langchain"
ttl = timedelta(days=1),
history = MomentoChatMessageHistory.from_client_params(
session_id,
cache_name,
ttl,
)
history.add_user_message("hi!")
history.add_ai_message("whats up?")
history.messages
[HumanMessage(content='hi!', additional_kwargs={}, example=False),
AIMessage(content='whats up?', additional_kwargs={}, example=False)]
previous
Dynamodb Chat Message History
next
Mongodb Chat Message History
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/modules/memory/examples/momento_chat_message_history.html |
3714dd52a333-0 | .ipynb
.pdf
Dynamodb Chat Message History
Contents
DynamoDBChatMessageHistory
Agent with DynamoDB Memory
Dynamodb Chat Message History#
This notebook goes over how to use Dynamodb to store chat message history.
First make sure you have correctly configured the AWS CLI. Then make sure you have installed boto3.
Next, create the DynamoDB Table where we will be storing messages:
import boto3
# Get the service resource.
dynamodb = boto3.resource('dynamodb')
# Create the DynamoDB table.
table = dynamodb.create_table(
TableName='SessionTable',
KeySchema=[
{
'AttributeName': 'SessionId',
'KeyType': 'HASH'
}
],
AttributeDefinitions=[
{
'AttributeName': 'SessionId',
'AttributeType': 'S'
}
],
BillingMode='PAY_PER_REQUEST',
)
# Wait until the table exists.
table.meta.client.get_waiter('table_exists').wait(TableName='SessionTable')
# Print out some data about the table.
print(table.item_count)
0
DynamoDBChatMessageHistory#
from langchain.memory.chat_message_histories import DynamoDBChatMessageHistory
history = DynamoDBChatMessageHistory(table_name="SessionTable", session_id="0")
history.add_user_message("hi!")
history.add_ai_message("whats up?")
history.messages
[HumanMessage(content='hi!', additional_kwargs={}, example=False),
AIMessage(content='whats up?', additional_kwargs={}, example=False)]
Agent with DynamoDB Memory#
from langchain.agents import Tool
from langchain.memory import ConversationBufferMemory
from langchain.chat_models import ChatOpenAI
from langchain.agents import initialize_agent
from langchain.agents import AgentType | https://python.langchain.com/en/latest/modules/memory/examples/dynamodb_chat_message_history.html |
3714dd52a333-1 | from langchain.agents import initialize_agent
from langchain.agents import AgentType
from langchain.utilities import PythonREPL
from getpass import getpass
message_history = DynamoDBChatMessageHistory(table_name="SessionTable", session_id="1")
memory = ConversationBufferMemory(memory_key="chat_history", chat_memory=message_history, return_messages=True)
python_repl = PythonREPL()
# You can create the tool to pass to an agent
tools = [Tool(
name="python_repl",
description="A Python shell. Use this to execute python commands. Input should be a valid python command. If you want to see the output of a value, you should print it out with `print(...)`.",
func=python_repl.run
)]
llm=ChatOpenAI(temperature=0)
agent_chain = initialize_agent(tools, llm, agent=AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION, verbose=True, memory=memory)
agent_chain.run(input="Hello!")
> Entering new AgentExecutor chain...
{
"action": "Final Answer",
"action_input": "Hello! How can I assist you today?"
}
> Finished chain.
'Hello! How can I assist you today?'
agent_chain.run(input="Who owns Twitter?")
> Entering new AgentExecutor chain...
{
"action": "python_repl",
"action_input": "import requests\nfrom bs4 import BeautifulSoup\n\nurl = 'https://en.wikipedia.org/wiki/Twitter'\nresponse = requests.get(url)\nsoup = BeautifulSoup(response.content, 'html.parser')\nowner = soup.find('th', text='Owner').find_next_sibling('td').text.strip()\nprint(owner)"
} | https://python.langchain.com/en/latest/modules/memory/examples/dynamodb_chat_message_history.html |
3714dd52a333-2 | }
Observation: X Corp. (2023–present)Twitter, Inc. (2006–2023)
Thought:{
"action": "Final Answer",
"action_input": "X Corp. (2023–present)Twitter, Inc. (2006–2023)"
}
> Finished chain.
'X Corp. (2023–present)Twitter, Inc. (2006–2023)'
agent_chain.run(input="My name is Bob.")
> Entering new AgentExecutor chain...
{
"action": "Final Answer",
"action_input": "Hello Bob! How can I assist you today?"
}
> Finished chain.
'Hello Bob! How can I assist you today?'
agent_chain.run(input="Who am I?")
> Entering new AgentExecutor chain...
{
"action": "Final Answer",
"action_input": "Your name is Bob."
}
> Finished chain.
'Your name is Bob.'
previous
How to create a custom Memory class
next
Momento
Contents
DynamoDBChatMessageHistory
Agent with DynamoDB Memory
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/modules/memory/examples/dynamodb_chat_message_history.html |
69a5331a7645-0 | .ipynb
.pdf
How to add Memory to an Agent
How to add Memory to an Agent#
This notebook goes over adding memory to an Agent. Before going through this notebook, please walkthrough the following notebooks, as this will build on top of both of them:
Adding memory to an LLM Chain
Custom Agents
In order to add a memory to an agent we are going to the the following steps:
We are going to create an LLMChain with memory.
We are going to use that LLMChain to create a custom Agent.
For the purposes of this exercise, we are going to create a simple custom Agent that has access to a search tool and utilizes the ConversationBufferMemory class.
from langchain.agents import ZeroShotAgent, Tool, AgentExecutor
from langchain.memory import ConversationBufferMemory
from langchain import OpenAI, LLMChain
from langchain.utilities import GoogleSearchAPIWrapper
search = GoogleSearchAPIWrapper()
tools = [
Tool(
name = "Search",
func=search.run,
description="useful for when you need to answer questions about current events"
)
]
Notice the usage of the chat_history variable in the PromptTemplate, which matches up with the dynamic key name in the ConversationBufferMemory.
prefix = """Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:"""
suffix = """Begin!"
{chat_history}
Question: {input}
{agent_scratchpad}"""
prompt = ZeroShotAgent.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
input_variables=["input", "chat_history", "agent_scratchpad"]
)
memory = ConversationBufferMemory(memory_key="chat_history") | https://python.langchain.com/en/latest/modules/memory/examples/agent_with_memory.html |
69a5331a7645-1 | )
memory = ConversationBufferMemory(memory_key="chat_history")
We can now construct the LLMChain, with the Memory object, and then create the agent.
llm_chain = LLMChain(llm=OpenAI(temperature=0), prompt=prompt)
agent = ZeroShotAgent(llm_chain=llm_chain, tools=tools, verbose=True)
agent_chain = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True, memory=memory)
agent_chain.run(input="How many people live in canada?")
> Entering new AgentExecutor chain...
Thought: I need to find out the population of Canada
Action: Search
Action Input: Population of Canada | https://python.langchain.com/en/latest/modules/memory/examples/agent_with_memory.html |
69a5331a7645-2 | Action: Search
Action Input: Population of Canada
Observation: The current population of Canada is 38,566,192 as of Saturday, December 31, 2022, based on Worldometer elaboration of the latest United Nations data. · Canada ... Additional information related to Canadian population trends can be found on Statistics Canada's Population and Demography Portal. Population of Canada (real- ... Index to the latest information from the Census of Population. This survey conducted by Statistics Canada provides a statistical portrait of Canada and its ... 14 records ... Estimated number of persons by quarter of a year and by year, Canada, provinces and territories. The 2021 Canadian census counted a total population of 36,991,981, an increase of around 5.2 percent over the 2016 figure. ... Between 1990 and 2008, the ... ( 2 ) Census reports and other statistical publications from national statistical offices, ( 3 ) Eurostat: Demographic Statistics, ( 4 ) United Nations ... Canada is a country in North America. Its ten provinces and three territories extend from ... Population. • Q4 2022 estimate. 39,292,355 (37th). Information is available for the total Indigenous population and each of the three ... The term 'Aboriginal' or 'Indigenous' used on the Statistics Canada ... Jun 14, 2022 ... Determinants of health are the broad range of personal, social, economic and environmental factors that determine individual and population ... COVID-19 vaccination coverage across Canada by demographics and key populations. Updated every Friday at 12:00 PM Eastern Time.
Thought: I now know the final answer
Final Answer: The current population of Canada is 38,566,192 as of Saturday, December 31, 2022, based on Worldometer elaboration of the latest United Nations data.
> Finished AgentExecutor chain. | https://python.langchain.com/en/latest/modules/memory/examples/agent_with_memory.html |
69a5331a7645-3 | > Finished AgentExecutor chain.
'The current population of Canada is 38,566,192 as of Saturday, December 31, 2022, based on Worldometer elaboration of the latest United Nations data.'
To test the memory of this agent, we can ask a followup question that relies on information in the previous exchange to be answered correctly.
agent_chain.run(input="what is their national anthem called?")
> Entering new AgentExecutor chain...
Thought: I need to find out what the national anthem of Canada is called.
Action: Search
Action Input: National Anthem of Canada | https://python.langchain.com/en/latest/modules/memory/examples/agent_with_memory.html |
69a5331a7645-4 | Action: Search
Action Input: National Anthem of Canada
Observation: Jun 7, 2010 ... https://twitter.com/CanadaImmigrantCanadian National Anthem O Canada in HQ - complete with lyrics, captions, vocals & music.LYRICS:O Canada! Nov 23, 2022 ... After 100 years of tradition, O Canada was proclaimed Canada's national anthem in 1980. The music for O Canada was composed in 1880 by Calixa ... O Canada, national anthem of Canada. It was proclaimed the official national anthem on July 1, 1980. “God Save the Queen” remains the royal anthem of Canada ... O Canada! Our home and native land! True patriot love in all of us command. Car ton bras sait porter l'épée,. Il sait porter la croix! "O Canada" (French: Ô Canada) is the national anthem of Canada. The song was originally commissioned by Lieutenant Governor of Quebec Théodore Robitaille ... Feb 1, 2018 ... It was a simple tweak — just two words. But with that, Canada just voted to make its national anthem, “O Canada,” gender neutral, ... "O Canada" was proclaimed Canada's national anthem on July 1,. 1980, 100 years after it was first sung on June 24, 1880. The music. Patriotic music in Canada dates back over 200 years as a distinct category from British or French patriotism, preceding the first legal steps to ... Feb 4, 2022 ... English version: O Canada! Our home and native land! True patriot love in all of us command. With glowing hearts we ... Feb 1, 2018 ... Canada's Senate has passed a bill making the country's national anthem gender-neutral. If you're not familiar with the words to “O Canada,” ... | https://python.langchain.com/en/latest/modules/memory/examples/agent_with_memory.html |
69a5331a7645-5 | Thought: I now know the final answer.
Final Answer: The national anthem of Canada is called "O Canada".
> Finished AgentExecutor chain.
'The national anthem of Canada is called "O Canada".'
We can see that the agent remembered that the previous question was about Canada, and properly asked Google Search what the name of Canada’s national anthem was.
For fun, let’s compare this to an agent that does NOT have memory.
prefix = """Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:"""
suffix = """Begin!"
Question: {input}
{agent_scratchpad}"""
prompt = ZeroShotAgent.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
input_variables=["input", "agent_scratchpad"]
)
llm_chain = LLMChain(llm=OpenAI(temperature=0), prompt=prompt)
agent = ZeroShotAgent(llm_chain=llm_chain, tools=tools, verbose=True)
agent_without_memory = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True)
agent_without_memory.run("How many people live in canada?")
> Entering new AgentExecutor chain...
Thought: I need to find out the population of Canada
Action: Search
Action Input: Population of Canada | https://python.langchain.com/en/latest/modules/memory/examples/agent_with_memory.html |
69a5331a7645-6 | Action: Search
Action Input: Population of Canada
Observation: The current population of Canada is 38,566,192 as of Saturday, December 31, 2022, based on Worldometer elaboration of the latest United Nations data. · Canada ... Additional information related to Canadian population trends can be found on Statistics Canada's Population and Demography Portal. Population of Canada (real- ... Index to the latest information from the Census of Population. This survey conducted by Statistics Canada provides a statistical portrait of Canada and its ... 14 records ... Estimated number of persons by quarter of a year and by year, Canada, provinces and territories. The 2021 Canadian census counted a total population of 36,991,981, an increase of around 5.2 percent over the 2016 figure. ... Between 1990 and 2008, the ... ( 2 ) Census reports and other statistical publications from national statistical offices, ( 3 ) Eurostat: Demographic Statistics, ( 4 ) United Nations ... Canada is a country in North America. Its ten provinces and three territories extend from ... Population. • Q4 2022 estimate. 39,292,355 (37th). Information is available for the total Indigenous population and each of the three ... The term 'Aboriginal' or 'Indigenous' used on the Statistics Canada ... Jun 14, 2022 ... Determinants of health are the broad range of personal, social, economic and environmental factors that determine individual and population ... COVID-19 vaccination coverage across Canada by demographics and key populations. Updated every Friday at 12:00 PM Eastern Time.
Thought: I now know the final answer
Final Answer: The current population of Canada is 38,566,192 as of Saturday, December 31, 2022, based on Worldometer elaboration of the latest United Nations data.
> Finished AgentExecutor chain. | https://python.langchain.com/en/latest/modules/memory/examples/agent_with_memory.html |
69a5331a7645-7 | > Finished AgentExecutor chain.
'The current population of Canada is 38,566,192 as of Saturday, December 31, 2022, based on Worldometer elaboration of the latest United Nations data.'
agent_without_memory.run("what is their national anthem called?")
> Entering new AgentExecutor chain...
Thought: I should look up the answer
Action: Search
Action Input: national anthem of [country] | https://python.langchain.com/en/latest/modules/memory/examples/agent_with_memory.html |
69a5331a7645-8 | Action: Search
Action Input: national anthem of [country]
Observation: Most nation states have an anthem, defined as "a song, as of praise, devotion, or patriotism"; most anthems are either marches or hymns in style. List of all countries around the world with its national anthem. ... Title and lyrics in the language of the country and translated into English, Aug 1, 2021 ... 1. Afghanistan, "Milli Surood" (National Anthem) · 2. Armenia, "Mer Hayrenik" (Our Fatherland) · 3. Azerbaijan (a transcontinental country with ... A national anthem is a patriotic musical composition symbolizing and evoking eulogies of the history and traditions of a country or nation. National Anthem of Every Country ; Fiji, “Meda Dau Doka” (“God Bless Fiji”) ; Finland, “Maamme”. (“Our Land”) ; France, “La Marseillaise” (“The Marseillaise”). You can find an anthem in the menu at the top alphabetically or you can use the search feature. This site is focussed on the scholarly study of national anthems ... Feb 13, 2022 ... The 38-year-old country music artist had the honor of singing the National Anthem during this year's big game, and she did not disappoint. Oldest of the World's National Anthems ; France, La Marseillaise (“The Marseillaise”), 1795 ; Argentina, Himno Nacional Argentino (“Argentine National Anthem”) ... Mar 3, 2022 ... Country music star Jessie James Decker gained the respect of music and hockey fans alike after a jaw-dropping rendition of "The Star-Spangled ... This list shows the country on the left, the national anthem in the ... There are many countries over the world who have a national anthem of their own. | https://python.langchain.com/en/latest/modules/memory/examples/agent_with_memory.html |
69a5331a7645-9 | Thought: I now know the final answer
Final Answer: The national anthem of [country] is [name of anthem].
> Finished AgentExecutor chain.
'The national anthem of [country] is [name of anthem].'
previous
How to add memory to a Multi-Input Chain
next
Adding Message Memory backed by a database to an Agent
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/modules/memory/examples/agent_with_memory.html |
2a1e0b5d150c-0 | .ipynb
.pdf
Redis Chat Message History
Redis Chat Message History#
This notebook goes over how to use Redis to store chat message history.
from langchain.memory import RedisChatMessageHistory
history = RedisChatMessageHistory("foo")
history.add_user_message("hi!")
history.add_ai_message("whats up?")
history.messages
[AIMessage(content='whats up?', additional_kwargs={}),
HumanMessage(content='hi!', additional_kwargs={})]
previous
Postgres Chat Message History
next
Zep Memory
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on May 28, 2023. | https://python.langchain.com/en/latest/modules/memory/examples/redis_chat_message_history.html |
68618ae92bde-0 | .ipynb
.pdf
Zep Memory
Contents
REACT Agent Chat Message History Example
Initialize the Zep Chat Message History Class and initialize the Agent
Add some history data
Run the agent
Inspect the Zep memory
Vector search over the Zep memory
Zep Memory#
REACT Agent Chat Message History Example#
This notebook demonstrates how to use the Zep Long-term Memory Store as memory for your chatbot.
We’ll demonstrate:
Adding conversation history to the Zep memory store.
Running an agent and having message automatically added to the store.
Viewing the enriched messages.
Vector search over the conversation history.
More on Zep:
Zep stores, summarizes, embeds, indexes, and enriches conversational AI chat histories, and exposes them via simple, low-latency APIs.
Key Features:
Long-term memory persistence, with access to historical messages irrespective of your summarization strategy.
Auto-summarization of memory messages based on a configurable message window. A series of summaries are stored, providing flexibility for future summarization strategies.
Vector search over memories, with messages automatically embedded on creation.
Auto-token counting of memories and summaries, allowing finer-grained control over prompt assembly.
Python and JavaScript SDKs.
Zep project: getzep/zep
Docs: https://getzep.github.io
from langchain.memory.chat_message_histories import ZepChatMessageHistory
from langchain.memory import ConversationBufferMemory
from langchain import OpenAI
from langchain.schema import HumanMessage, AIMessage
from langchain.tools import DuckDuckGoSearchRun
from langchain.agents import initialize_agent, AgentType
from uuid import uuid4
# Set this to your Zep server URL
ZEP_API_URL = "http://localhost:8000"
session_id = str(uuid4()) # This is a unique identifier for the user | https://python.langchain.com/en/latest/modules/memory/examples/zep_memory.html |
68618ae92bde-1 | session_id = str(uuid4()) # This is a unique identifier for the user
# Load your OpenAI key from a .env file
from dotenv import load_dotenv
load_dotenv()
True
Initialize the Zep Chat Message History Class and initialize the Agent#
ddg = DuckDuckGoSearchRun()
tools = [ddg]
# Set up Zep Chat History
zep_chat_history = ZepChatMessageHistory(
session_id=session_id,
url=ZEP_API_URL,
)
# Use a standard ConversationBufferMemory to encapsulate the Zep chat history
memory = ConversationBufferMemory(
memory_key="chat_history", chat_memory=zep_chat_history
)
# Initialize the agent
llm = OpenAI(temperature=0)
agent_chain = initialize_agent(
tools,
llm,
agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION,
verbose=True,
memory=memory,
)
Add some history data#
# Preload some messages into the memory. The default message window is 12 messages. We want to push beyond this to demonstrate auto-summarization.
test_history = [
{"role": "human", "content": "Who was Octavia Butler?"},
{
"role": "ai",
"content": (
"Octavia Estelle Butler (June 22, 1947 – February 24, 2006) was an American"
" science fiction author."
),
},
{"role": "human", "content": "Which books of hers were made into movies?"},
{
"role": "ai",
"content": (
"The most well-known adaptation of Octavia Butler's work is the FX series" | https://python.langchain.com/en/latest/modules/memory/examples/zep_memory.html |
Subsets and Splits