id
stringlengths
14
16
text
stringlengths
31
3.14k
source
stringlengths
58
124
a17e6783cd11-25
to get your token. And create a v2 version of the app. classmethod from_bearer_token(oauth2_bearer_token: str, twitter_users: Sequence[str], number_tweets: Optional[int] = 100) → langchain.document_loaders.twitter.TwitterTweetLoader[source]# Create a TwitterTweetLoader from OAuth2 bearer token. classmethod from_secrets(access_token: str, access_token_secret: str, consumer_key: str, consumer_secret: str, twitter_users: Sequence[str], number_tweets: Optional[int] = 100) → langchain.document_loaders.twitter.TwitterTweetLoader[source]# Create a TwitterTweetLoader from access tokens and secrets. load() → List[langchain.schema.Document][source]# Load tweets. class langchain.document_loaders.UnstructuredEPubLoader(file_path: str, mode: str = 'single', **unstructured_kwargs: Any)[source]# Loader that uses unstructured to load epub files. class langchain.document_loaders.UnstructuredEmailLoader(file_path: str, mode: str = 'single', **unstructured_kwargs: Any)[source]# Loader that uses unstructured to load email files. class langchain.document_loaders.UnstructuredFileIOLoader(file: IO, mode: str = 'single', **unstructured_kwargs: Any)[source]#
/content/https://python.langchain.com/en/latest/reference/modules/document_loaders.html
a17e6783cd11-26
Loader that uses unstructured to load file IO objects. class langchain.document_loaders.UnstructuredFileLoader(file_path: str, mode: str = 'single', **unstructured_kwargs: Any)[source]# Loader that uses unstructured to load files. class langchain.document_loaders.UnstructuredHTMLLoader(file_path: str, mode: str = 'single', **unstructured_kwargs: Any)[source]# Loader that uses unstructured to load HTML files. class langchain.document_loaders.UnstructuredImageLoader(file_path: str, mode: str = 'single', **unstructured_kwargs: Any)[source]# Loader that uses unstructured to load image files, such as PNGs and JPGs. class langchain.document_loaders.UnstructuredMarkdownLoader(file_path: str, mode: str = 'single', **unstructured_kwargs: Any)[source]# Loader that uses unstructured to load markdown files. class langchain.document_loaders.UnstructuredPDFLoader(file_path: str, mode: str = 'single', **unstructured_kwargs: Any)[source]# Loader that uses unstructured to load PDF files. class langchain.document_loaders.UnstructuredPowerPointLoader(file_path: str, mode: str = 'single', **unstructured_kwargs: Any)[source]# Loader that uses unstructured to load powerpoint files.
/content/https://python.langchain.com/en/latest/reference/modules/document_loaders.html
a17e6783cd11-27
Loader that uses unstructured to load powerpoint files. class langchain.document_loaders.UnstructuredRTFLoader(file_path: str, mode: str = 'single', **unstructured_kwargs: Any)[source]# Loader that uses unstructured to load rtf files. class langchain.document_loaders.UnstructuredURLLoader(urls: List[str], continue_on_failure: bool = True, **unstructured_kwargs: Any)[source]# Loader that uses unstructured to load HTML files. load() → List[langchain.schema.Document][source]# Load file. class langchain.document_loaders.UnstructuredWordDocumentLoader(file_path: str, mode: str = 'single', **unstructured_kwargs: Any)[source]# Loader that uses unstructured to load word documents. class langchain.document_loaders.WebBaseLoader(web_path: Union[str, List[str]], header_template: Optional[dict] = None)[source]# Loader that uses urllib and beautiful soup to load webpages. aload() → List[langchain.schema.Document][source]# Load text from the urls in web_path async into Documents. default_parser: str = 'html.parser'# Default parser to use for BeautifulSoup. async fetch_all(urls: List[str]) → Any[source]# Fetch all urls concurrently with rate limiting. load() → List[langchain.schema.Document][source]#
/content/https://python.langchain.com/en/latest/reference/modules/document_loaders.html
a17e6783cd11-28
load() → List[langchain.schema.Document][source]# Load text from the url(s) in web_path. requests_per_second: int = 2# Max number of concurrent requests to make. scrape(parser: Optional[str] = None) → Any[source]# Scrape data from webpage and return it in BeautifulSoup format. scrape_all(urls: List[str], parser: Optional[str] = None) → List[Any][source]# Fetch all urls, then return soups for all results. property web_path: str# web_paths: List[str]# class langchain.document_loaders.WhatsAppChatLoader(path: str)[source]# Loader that loads WhatsApp messages text file. load() → List[langchain.schema.Document][source]# Load documents. class langchain.document_loaders.YoutubeLoader(video_id: str, add_video_info: bool = False, language: str = 'en', continue_on_failure: bool = False)[source]# Loader that loads Youtube transcripts. classmethod from_youtube_url(youtube_url: str, **kwargs: Any) → langchain.document_loaders.youtube.YoutubeLoader[source]# Given youtube URL, load video. load() → List[langchain.schema.Document][source]# Load documents. previous Text Splitter next Vector Stores By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 26, 2023.
/content/https://python.langchain.com/en/latest/reference/modules/document_loaders.html
383d437f8931-0
.rst .pdf Utilities Utilities# General utilities. pydantic model langchain.utilities.ApifyWrapper[source]# Wrapper around Apify. To use, you should have the apify-client python package installed, and the environment variable APIFY_API_TOKEN set with your API key, or pass apify_api_token as a named parameter to the constructor. field apify_client: Any = None# field apify_client_async: Any = None# async acall_actor(actor_id: str, run_input: Dict, dataset_mapping_function: Callable[[Dict], langchain.schema.Document], *, build: Optional[str] = None, memory_mbytes: Optional[int] = None, timeout_secs: Optional[int] = None) → langchain.document_loaders.apify_dataset.ApifyDatasetLoader[source]# Run an Actor on the Apify platform and wait for results to be ready. Parameters actor_id (str) – The ID or name of the Actor on the Apify platform. run_input (Dict) – The input object of the Actor that you’re trying to run. dataset_mapping_function (Callable) – A function that takes a single dictionary (an Apify dataset item) and converts it to an instance of the Document class. build (str, optional) – Optionally specifies the actor build to run. It can be either a build tag or build number. memory_mbytes (int, optional) – Optional memory limit for the run, in megabytes.
/content/https://python.langchain.com/en/latest/reference/modules/utilities.html
383d437f8931-1
in megabytes. timeout_secs (int, optional) – Optional timeout for the run, in seconds. Returns A loader that will fetch the records from theActor run’s default dataset. Return type ApifyDatasetLoader call_actor(actor_id: str, run_input: Dict, dataset_mapping_function: Callable[[Dict], langchain.schema.Document], *, build: Optional[str] = None, memory_mbytes: Optional[int] = None, timeout_secs: Optional[int] = None) → langchain.document_loaders.apify_dataset.ApifyDatasetLoader[source]# Run an Actor on the Apify platform and wait for results to be ready. Parameters actor_id (str) – The ID or name of the Actor on the Apify platform. run_input (Dict) – The input object of the Actor that you’re trying to run. dataset_mapping_function (Callable) – A function that takes a single dictionary (an Apify dataset item) and converts it to an instance of the Document class. build (str, optional) – Optionally specifies the actor build to run. It can be either a build tag or build number. memory_mbytes (int, optional) – Optional memory limit for the run, in megabytes. timeout_secs (int, optional) – Optional timeout for the run, in seconds. Returns A loader that will fetch the records from theActor run’s default dataset. Return type ApifyDatasetLoader
/content/https://python.langchain.com/en/latest/reference/modules/utilities.html
383d437f8931-2
Return type ApifyDatasetLoader pydantic model langchain.utilities.ArxivAPIWrapper[source]# Wrapper around ArxivAPI. To use, you should have the arxiv python package installed. https://lukasschwab.me/arxiv.py/index.html This wrapper will use the Arxiv API to conduct searches and fetch document summaries. By default, it will return the document summaries of the top-k results of an input search. field arxiv_exceptions: Any = None# field top_k_results: int = 3# run(query: str) → str[source]# Run Arxiv search and get the document meta information. See https://lukasschwab.me/arxiv.py/index.html#Search See https://lukasschwab.me/arxiv.py/index.html#Result It uses only the most informative fields of document meta information. class langchain.utilities.BashProcess(strip_newlines: bool = False, return_err_output: bool = False)[source]# Executes bash commands and returns the output. run(commands: Union[str, List[str]]) → str[source]# Run commands and return final output. pydantic model langchain.utilities.BingSearchAPIWrapper[source]# Wrapper for Bing Search API. In order to set this up, follow instructions at: https://levelup.gitconnected.com/api-tutorial-how-to-use-bing-web-search-api-in-python-4165d5592a7e
/content/https://python.langchain.com/en/latest/reference/modules/utilities.html
383d437f8931-3
field bing_search_url: str [Required]# field bing_subscription_key: str [Required]# field k: int = 10# results(query: str, num_results: int) → List[Dict][source]# Run query through BingSearch and return metadata. Parameters query – The query to search for. num_results – The number of results to return. Returns snippet - The description of the result. title - The title of the result. link - The link to the result. Return type A list of dictionaries with the following keys run(query: str) → str[source]# Run query through BingSearch and parse result. pydantic model langchain.utilities.GooglePlacesAPIWrapper[source]# Wrapper around Google Places API. To use, you should have the googlemaps python package installed,an API key for the google maps platform, and the enviroment variable ‘’GPLACES_API_KEY’’ set with your API key , or pass ‘gplaces_api_key’ as a named parameter to the constructor. By default, this will return the all the results on the input query.You can use the top_k_results argument to limit the number of results. Example from langchain import GooglePlacesAPIWrapper gplaceapi = GooglePlacesAPIWrapper() field gplaces_api_key: Optional[str] = None# field top_k_results: Optional[int] = None# fetch_place_details(place_id: str) → Optional[str][source]#
/content/https://python.langchain.com/en/latest/reference/modules/utilities.html
383d437f8931-4
format_place_details(place_details: Dict[str, Any]) → Optional[str][source]# run(query: str) → str[source]# Run Places search and get k number of places that exists that match. pydantic model langchain.utilities.GoogleSearchAPIWrapper[source]# Wrapper for Google Search API. Adapted from: Instructions adapted from https://stackoverflow.com/questions/ 37083058/ programmatically-searching-google-in-python-using-custom-search TODO: DOCS for using it 1. Install google-api-python-client - If you don’t already have a Google account, sign up. - If you have never created a Google APIs Console project, read the Managing Projects page and create a project in the Google API Console. - Install the library using pip install google-api-python-client The current version of the library is 2.70.0 at this time 2. To create an API key: - Navigate to the APIs & Services→Credentials panel in Cloud Console. - Select Create credentials, then select API key from the drop-down menu. - The API key created dialog box displays your newly created key. - You now have an API_KEY 3. Setup Custom Search Engine so you can search the entire web - Create a custom search engine in this link. - In Sites to search, add any valid URL (i.e. www.stackoverflow.com). - That’s all you have to fill up, the rest doesn’t matter. In the left-side menu, click Edit search engine → {your search engine name} → Setup Set Search the entire web to ON. Remove the URL you added from
/content/https://python.langchain.com/en/latest/reference/modules/utilities.html
383d437f8931-5
→ Setup Set Search the entire web to ON. Remove the URL you added from the list of Sites to search. Under Search engine ID you’ll find the search-engine-ID. 4. Enable the Custom Search API - Navigate to the APIs & Services→Dashboard panel in Cloud Console. - Click Enable APIs and Services. - Search for Custom Search API and click on it. - Click Enable. URL for it: https://console.cloud.google.com/apis/library/customsearch.googleapis .com field google_api_key: Optional[str] = None# field google_cse_id: Optional[str] = None# field k: int = 10# field siterestrict: bool = False# results(query: str, num_results: int) → List[Dict][source]# Run query through GoogleSearch and return metadata. Parameters query – The query to search for. num_results – The number of results to return. Returns snippet - The description of the result. title - The title of the result. link - The link to the result. Return type A list of dictionaries with the following keys run(query: str) → str[source]# Run query through GoogleSearch and parse result. pydantic model langchain.utilities.GoogleSerperAPIWrapper[source]# Wrapper around the Serper.dev Google Search API. You can create a free API key at https://serper.dev. To use, you should have the environment variable SERPER_API_KEY set with your API key, or pass serper_api_key as a named parameter to the constructor. Example
/content/https://python.langchain.com/en/latest/reference/modules/utilities.html
383d437f8931-6
to the constructor. Example from langchain import GoogleSerperAPIWrapper google_serper = GoogleSerperAPIWrapper() field gl: str = 'us'# field hl: str = 'en'# field k: int = 10# field serper_api_key: Optional[str] = None# run(query: str) → str[source]# Run query through GoogleSearch and parse result. pydantic model langchain.utilities.OpenWeatherMapAPIWrapper[source]# Wrapper for OpenWeatherMap API using PyOWM. Docs for using: Go to OpenWeatherMap and sign up for an API key Save your API KEY into OPENWEATHERMAP_API_KEY env variable pip install pyowm field openweathermap_api_key: Optional[str] = None# field owm: Any = None# run(location: str) → str[source]# Get the current weather information for a specified location. pydantic model langchain.utilities.PowerBIDataset[source]# Create PowerBI engine from dataset ID and credential or token. Use either the credential or a supplied token to authenticate. If both are supplied the credential is used to generate a token. The impersonated_user_name is the UPN of a user to be impersonated. If the model is not RLS enabled, this will be ignored. field aiosession: Optional[aiohttp.ClientSession] = None# field credential: Optional[Union[ChainedTokenCredential, InteractiveCredential]] = None# field dataset_id: str [Required]#
/content/https://python.langchain.com/en/latest/reference/modules/utilities.html
383d437f8931-7
field dataset_id: str [Required]# field group_id: Optional[str] = None# field impersonated_user_name: Optional[str] = None# field sample_rows_in_table_info: int = 1# Constraints exclusiveMinimum = 0 maximum = 10 field schemas: Dict[str, str] [Optional]# field table_names: List[str] [Required]# field token: Optional[str] = None# async aget_table_info(table_names: Optional[Union[List[str], str]] = None) → str[source]# Get information about specified tables. async arun(command: str) → Any[source]# Execute a DAX command and return the result asynchronously. get_schemas() → str[source]# Get the available schema’s. get_table_info(table_names: Optional[Union[List[str], str]] = None) → str[source]# Get information about specified tables. get_table_names() → Iterable[str][source]# Get names of tables available. run(command: str) → Any[source]# Execute a DAX command and return a json representing the results. property headers: Dict[str, str]# Get the token. property request_url: str# Get the request url. property table_info: str# Information about all tables in the database. pydantic model langchain.utilities.PythonREPL[source]# Simulates a standalone Python REPL.
/content/https://python.langchain.com/en/latest/reference/modules/utilities.html
383d437f8931-8
Simulates a standalone Python REPL. field globals: Optional[Dict] [Optional] (alias '_globals')# field locals: Optional[Dict] [Optional] (alias '_locals')# run(command: str) → str[source]# Run command with own globals/locals and returns anything printed. pydantic model langchain.utilities.SearxSearchWrapper[source]# Wrapper for Searx API. To use you need to provide the searx host by passing the named parameter searx_host or exporting the environment variable SEARX_HOST. In some situations you might want to disable SSL verification, for example if you are running searx locally. You can do this by passing the named parameter unsecure. You can also pass the host url scheme as http to disable SSL. Example from langchain.utilities import SearxSearchWrapper searx = SearxSearchWrapper(searx_host="http://localhost:8888") Example with SSL disabled:from langchain.utilities import SearxSearchWrapper # note the unsecure parameter is not needed if you pass the url scheme as # http searx = SearxSearchWrapper(searx_host="http://localhost:8888", unsecure=True) Validators disable_ssl_warnings » unsecure validate_params » all fields field aiosession: Optional[Any] = None# field categories: Optional[List[str]] = []# field engines: Optional[List[str]] = []#
/content/https://python.langchain.com/en/latest/reference/modules/utilities.html
383d437f8931-9
field engines: Optional[List[str]] = []# field headers: Optional[dict] = None# field k: int = 10# field params: dict [Optional]# field query_suffix: Optional[str] = ''# field searx_host: str = ''# field unsecure: bool = False# async aresults(query: str, num_results: int, engines: Optional[List[str]] = None, query_suffix: Optional[str] = '', **kwargs: Any) → List[Dict][source]# Asynchronously query with json results. Uses aiohttp. See results for more info. async arun(query: str, engines: Optional[List[str]] = None, query_suffix: Optional[str] = '', **kwargs: Any) → str[source]# Asynchronously version of run. results(query: str, num_results: int, engines: Optional[List[str]] = None, categories: Optional[List[str]] = None, query_suffix: Optional[str] = '', **kwargs: Any) → List[Dict][source]# Run query through Searx API and returns the results with metadata. Parameters query – The query to search for. query_suffix – Extra suffix appended to the query. num_results – Limit the number of results to return. engines – List of engines to use for the query. categories – List of categories to use for the query.
/content/https://python.langchain.com/en/latest/reference/modules/utilities.html
383d437f8931-10
categories – List of categories to use for the query. **kwargs – extra parameters to pass to the searx API. Returns {snippet: The description of the result. title: The title of the result. link: The link to the result. engines: The engines used for the result. category: Searx category of the result. } Return type Dict with the following keys run(query: str, engines: Optional[List[str]] = None, categories: Optional[List[str]] = None, query_suffix: Optional[str] = '', **kwargs: Any) → str[source]# Run query through Searx API and parse results. You can pass any other params to the searx query API. Parameters query – The query to search for. query_suffix – Extra suffix appended to the query. engines – List of engines to use for the query. categories – List of categories to use for the query. **kwargs – extra parameters to pass to the searx API. Returns The result of the query. Return type str Raises ValueError – If an error occured with the query. Example This will make a query to the qwant engine: from langchain.utilities import SearxSearchWrapper searx = SearxSearchWrapper(searx_host="http://my.searx.host") searx.run("what is the weather in France ?", engine="qwant") # the same result can be achieved using the `!` syntax of searx # to select the engine using `query_suffix`
/content/https://python.langchain.com/en/latest/reference/modules/utilities.html
383d437f8931-11
# to select the engine using `query_suffix` searx.run("what is the weather in France ?", query_suffix="!qwant") pydantic model langchain.utilities.SerpAPIWrapper[source]# Wrapper around SerpAPI. To use, you should have the google-search-results python package installed, and the environment variable SERPAPI_API_KEY set with your API key, or pass serpapi_api_key as a named parameter to the constructor. Example from langchain import SerpAPIWrapper serpapi = SerpAPIWrapper() field aiosession: Optional[aiohttp.client.ClientSession] = None# field params: dict = {'engine': 'google', 'gl': 'us', 'google_domain': 'google.com', 'hl': 'en'}# field serpapi_api_key: Optional[str] = None# async arun(query: str) → str[source]# Use aiohttp to run query through SerpAPI and parse result. get_params(query: str) → Dict[str, str][source]# Get parameters for SerpAPI. results(query: str) → dict[source]# Run query through SerpAPI and return the raw result. run(query: str) → str[source]# Run query through SerpAPI and parse result. pydantic model langchain.utilities.TextRequestsWrapper[source]# Lightweight wrapper around requests library. The main purpose of this wrapper is to always return a text output.
/content/https://python.langchain.com/en/latest/reference/modules/utilities.html
383d437f8931-12
The main purpose of this wrapper is to always return a text output. field aiosession: Optional[aiohttp.client.ClientSession] = None# field headers: Optional[Dict[str, str]] = None# async adelete(url: str, **kwargs: Any) → str[source]# DELETE the URL and return the text asynchronously. async aget(url: str, **kwargs: Any) → str[source]# GET the URL and return the text asynchronously. async apatch(url: str, data: Dict[str, Any], **kwargs: Any) → str[source]# PATCH the URL and return the text asynchronously. async apost(url: str, data: Dict[str, Any], **kwargs: Any) → str[source]# POST to the URL and return the text asynchronously. async aput(url: str, data: Dict[str, Any], **kwargs: Any) → str[source]# PUT the URL and return the text asynchronously. delete(url: str, **kwargs: Any) → str[source]# DELETE the URL and return the text. get(url: str, **kwargs: Any) → str[source]# GET the URL and return the text. patch(url: str, data: Dict[str, Any], **kwargs: Any) → str[source]# PATCH the URL and return the text. post(url: str, data: Dict[str, Any], **kwargs: Any) → str[source]#
/content/https://python.langchain.com/en/latest/reference/modules/utilities.html
383d437f8931-13
POST to the URL and return the text. put(url: str, data: Dict[str, Any], **kwargs: Any) → str[source]# PUT the URL and return the text. property requests: langchain.requests.Requests# pydantic model langchain.utilities.WikipediaAPIWrapper[source]# Wrapper around WikipediaAPI. To use, you should have the wikipedia python package installed. This wrapper will use the Wikipedia API to conduct searches and fetch page summaries. By default, it will return the page summaries of the top-k results of an input search. field lang: str = 'en'# field top_k_results: int = 3# fetch_formatted_page_summary(page: str) → Optional[str][source]# run(query: str) → str[source]# Run Wikipedia search and get page summaries. pydantic model langchain.utilities.WolframAlphaAPIWrapper[source]# Wrapper for Wolfram Alpha. Docs for using: Go to wolfram alpha and sign up for a developer account Create an app and get your APP ID Save your APP ID into WOLFRAM_ALPHA_APPID env variable pip install wolframalpha field wolfram_alpha_appid: Optional[str] = None# run(query: str) → str[source]# Run query through WolframAlpha and parse result. previous Agent Toolkits next Experimental Modules By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 26, 2023.
/content/https://python.langchain.com/en/latest/reference/modules/utilities.html
d031f0d735c9-0
.rst .pdf Example Selector Example Selector# Logic for selecting examples to include in prompts. pydantic model langchain.prompts.example_selector.LengthBasedExampleSelector[source]# Select examples based on length. Validators calculate_example_text_lengths » example_text_lengths field example_prompt: langchain.prompts.prompt.PromptTemplate [Required]# Prompt template used to format the examples. field examples: List[dict] [Required]# A list of the examples that the prompt template expects. field get_text_length: Callable[[str], int] = <function _get_length_based># Function to measure prompt length. Defaults to word count. field max_length: int = 2048# Max length for the prompt, beyond which examples are cut. add_example(example: Dict[str, str]) → None[source]# Add new example to list. select_examples(input_variables: Dict[str, str]) → List[dict][source]# Select which examples to use based on the input lengths. pydantic model langchain.prompts.example_selector.MaxMarginalRelevanceExampleSelector[source]# ExampleSelector that selects examples based on Max Marginal Relevance. This was shown to improve performance in this paper: https://arxiv.org/pdf/2211.13892.pdf field fetch_k: int = 20# Number of examples to fetch to rerank.
/content/https://python.langchain.com/en/latest/reference/modules/example_selector.html
d031f0d735c9-1
Number of examples to fetch to rerank. classmethod from_examples(examples: List[dict], embeddings: langchain.embeddings.base.Embeddings, vectorstore_cls: Type[langchain.vectorstores.base.VectorStore], k: int = 4, input_keys: Optional[List[str]] = None, fetch_k: int = 20, **vectorstore_cls_kwargs: Any) → langchain.prompts.example_selector.semantic_similarity.MaxMarginalRelevanceExampleSelector[source]# Create k-shot example selector using example list and embeddings. Reshuffles examples dynamically based on query similarity. Parameters examples – List of examples to use in the prompt. embeddings – An iniialized embedding API interface, e.g. OpenAIEmbeddings(). vectorstore_cls – A vector store DB interface class, e.g. FAISS. k – Number of examples to select input_keys – If provided, the search is based on the input variables instead of all variables. vectorstore_cls_kwargs – optional kwargs containing url for vector store Returns The ExampleSelector instantiated, backed by a vector store. select_examples(input_variables: Dict[str, str]) → List[dict][source]# Select which examples to use based on semantic similarity.
/content/https://python.langchain.com/en/latest/reference/modules/example_selector.html
d031f0d735c9-2
Select which examples to use based on semantic similarity. pydantic model langchain.prompts.example_selector.SemanticSimilarityExampleSelector[source]# Example selector that selects examples based on SemanticSimilarity. field example_keys: Optional[List[str]] = None# Optional keys to filter examples to. field input_keys: Optional[List[str]] = None# Optional keys to filter input to. If provided, the search is based on the input variables instead of all variables. field k: int = 4# Number of examples to select. field vectorstore: langchain.vectorstores.base.VectorStore [Required]# VectorStore than contains information about examples. add_example(example: Dict[str, str]) → str[source]# Add new example to vectorstore. classmethod from_examples(examples: List[dict], embeddings: langchain.embeddings.base.Embeddings, vectorstore_cls: Type[langchain.vectorstores.base.VectorStore], k: int = 4, input_keys: Optional[List[str]] = None, **vectorstore_cls_kwargs: Any) → langchain.prompts.example_selector.semantic_similarity.SemanticSimilarityExampleSelector[source]# Create k-shot example selector using example list and embeddings. Reshuffles examples dynamically based on query similarity. Parameters examples – List of examples to use in the prompt.
/content/https://python.langchain.com/en/latest/reference/modules/example_selector.html
d031f0d735c9-3
Parameters examples – List of examples to use in the prompt. embeddings – An initialized embedding API interface, e.g. OpenAIEmbeddings(). vectorstore_cls – A vector store DB interface class, e.g. FAISS. k – Number of examples to select input_keys – If provided, the search is based on the input variables instead of all variables. vectorstore_cls_kwargs – optional kwargs containing url for vector store Returns The ExampleSelector instantiated, backed by a vector store. select_examples(input_variables: Dict[str, str]) → List[dict][source]# Select which examples to use based on semantic similarity. previous PromptTemplates next Output Parsers By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 26, 2023.
/content/https://python.langchain.com/en/latest/reference/modules/example_selector.html
b9476fe061db-0
.rst .pdf Vector Stores Vector Stores# Wrappers on top of vector stores. class langchain.vectorstores.AnalyticDB(connection_string: str, embedding_function: langchain.embeddings.base.Embeddings, collection_name: str = 'langchain', collection_metadata: Optional[dict] = None, pre_delete_collection: bool = False, logger: Optional[logging.Logger] = None)[source]# VectorStore implementation using AnalyticDB. AnalyticDB is a distributed full PostgresSQL syntax cloud-native database. - connection_string is a postgres connection string. - embedding_function any embedding function implementing langchain.embeddings.base.Embeddings interface. collection_name is the name of the collection to use. (default: langchain) NOTE: This is not the name of the table, but the name of the collection.The tables will be created when initializing the store (if not exists) So, make sure the user has the right permissions to create tables. pre_delete_collection if True, will delete the collection if it exists.(default: False) - Useful for testing. add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any) → List[str][source]# Run more texts through the embeddings and add to the vectorstore. Parameters texts – Iterable of strings to add to the vectorstore.
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-1
Parameters texts – Iterable of strings to add to the vectorstore. metadatas – Optional list of metadatas associated with the texts. kwargs – vectorstore specific parameters Returns List of ids from adding the texts into the vectorstore. connect() → sqlalchemy.engine.base.Connection[source]# classmethod connection_string_from_db_params(driver: str, host: str, port: int, database: str, user: str, password: str) → str[source]# Return connection string from database parameters. create_collection() → None[source]# create_tables_if_not_exists() → None[source]# delete_collection() → None[source]# drop_tables() → None[source]# classmethod from_documents(documents: List[langchain.schema.Document], embedding: langchain.embeddings.base.Embeddings, collection_name: str = 'langchain', ids: Optional[List[str]] = None, pre_delete_collection: bool = False, **kwargs: Any) → langchain.vectorstores.analyticdb.AnalyticDB[source]# Return VectorStore initialized from documents and embeddings. Postgres connection string is required Either pass it as a parameter or set the PGVECTOR_CONNECTION_STRING environment variable.
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-2
or set the PGVECTOR_CONNECTION_STRING environment variable. classmethod from_texts(texts: List[str], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, collection_name: str = 'langchain', ids: Optional[List[str]] = None, pre_delete_collection: bool = False, **kwargs: Any) → langchain.vectorstores.analyticdb.AnalyticDB[source]# Return VectorStore initialized from texts and embeddings. Postgres connection string is required Either pass it as a parameter or set the PGVECTOR_CONNECTION_STRING environment variable. get_collection(session: sqlalchemy.orm.session.Session) → Optional[CollectionStore][source]# classmethod get_connection_string(kwargs: Dict[str, Any]) → str[source]# similarity_search(query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any) → List[langchain.schema.Document][source]# Run similarity search with AnalyticDB with distance. Parameters query (str) – Query text to search for. k (int) – Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]) – Filter by metadata. Defaults to None. Returns List of Documents most similar to the query.
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-3
Returns List of Documents most similar to the query. similarity_search_by_vector(embedding: List[float], k: int = 4, filter: Optional[dict] = None, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to embedding vector. Parameters embedding – Embedding to look up documents similar to. k – Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]) – Filter by metadata. Defaults to None. Returns List of Documents most similar to the query vector. similarity_search_with_score(query: str, k: int = 4, filter: Optional[dict] = None) → List[Tuple[langchain.schema.Document, float]][source]# Return docs most similar to query. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]) – Filter by metadata. Defaults to None. Returns List of Documents most similar to the query and score for each similarity_search_with_score_by_vector(embedding: List[float], k: int = 4, filter: Optional[dict] = None) → List[Tuple[langchain.schema.Document, float]][source]#
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-4
class langchain.vectorstores.Annoy(embedding_function: Callable, index: Any, metric: str, docstore: langchain.docstore.base.Docstore, index_to_docstore_id: Dict[int, str])[source]# Wrapper around Annoy vector database. To use, you should have the annoy python package installed. Example from langchain import Annoy db = Annoy(embedding_function, index, docstore, index_to_docstore_id) add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any) → List[str][source]# Run more texts through the embeddings and add to the vectorstore. Parameters texts – Iterable of strings to add to the vectorstore. metadatas – Optional list of metadatas associated with the texts. kwargs – vectorstore specific parameters Returns List of ids from adding the texts into the vectorstore. classmethod from_embeddings(text_embeddings: List[Tuple[str, List[float]]], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, metric: str = 'angular', trees: int = 100, n_jobs: int = - 1, **kwargs: Any) → langchain.vectorstores.annoy.Annoy[source]# Construct Annoy wrapper from embeddings. Parameters text_embeddings – List of tuples of (text, embedding)
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-5
text_embeddings – List of tuples of (text, embedding) embedding – Embedding function to use. metadatas – List of metadata dictionaries to associate with documents. metric – Metric to use for indexing. Defaults to “angular”. trees – Number of trees to use for indexing. Defaults to 100. n_jobs – Number of jobs to use for indexing. Defaults to -1 This is a user friendly interface that: Creates an in memory docstore with provided embeddings Initializes the Annoy database This is intended to be a quick way to get started. Example from langchain import Annoy from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) db = Annoy.from_embeddings(text_embedding_pairs, embeddings) classmethod from_texts(texts: List[str], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, metric: str = 'angular', trees: int = 100, n_jobs: int = - 1, **kwargs: Any) → langchain.vectorstores.annoy.Annoy[source]# Construct Annoy wrapper from raw documents. Parameters texts – List of documents to index. embedding – Embedding function to use.
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-6
texts – List of documents to index. embedding – Embedding function to use. metadatas – List of metadata dictionaries to associate with documents. metric – Metric to use for indexing. Defaults to “angular”. trees – Number of trees to use for indexing. Defaults to 100. n_jobs – Number of jobs to use for indexing. Defaults to -1. This is a user friendly interface that: Embeds documents. Creates an in memory docstore Initializes the Annoy database This is intended to be a quick way to get started. Example from langchain import Annoy from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() index = Annoy.from_texts(texts, embeddings) classmethod load_local(folder_path: str, embeddings: langchain.embeddings.base.Embeddings) → langchain.vectorstores.annoy.Annoy[source]# Load Annoy index, docstore, and index_to_docstore_id to disk. Parameters folder_path – folder path to load index, docstore, and index_to_docstore_id from. embeddings – Embeddings to use when generating queries. max_marginal_relevance_search(query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs selected using the maximal marginal relevance.
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-7
Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. fetch_k – Number of Documents to fetch to pass to MMR algorithm. lambda_mult – Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns List of Documents selected by maximal marginal relevance. max_marginal_relevance_search_by_vector(embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Parameters embedding – Embedding to look up documents similar to. fetch_k – Number of Documents to fetch to pass to MMR algorithm. k – Number of Documents to return. Defaults to 4. lambda_mult – Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns List of Documents selected by maximal marginal relevance. process_index_results(idxs: List[int], dists: List[float]) → List[Tuple[langchain.schema.Document, float]][source]#
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-8
Turns annoy results into a list of documents and scores. Parameters idxs – List of indices of the documents in the index. dists – List of distances of the documents in the index. Returns List of Documents and scores. save_local(folder_path: str, prefault: bool = False) → None[source]# Save Annoy index, docstore, and index_to_docstore_id to disk. Parameters folder_path – folder path to save index, docstore, and index_to_docstore_id to. prefault – Whether to pre-load the index into memory. similarity_search(query: str, k: int = 4, search_k: int = - 1, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to query. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. search_k – inspect up to search_k nodes which defaults to n_trees * n if not provided Returns List of Documents most similar to the query. similarity_search_by_index(docstore_index: int, k: int = 4, search_k: int = - 1, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to docstore_index. Parameters docstore_index – Index of document in docstore k – Number of Documents to return. Defaults to 4. search_k – inspect up to search_k nodes which defaults to n_trees * n if not provided Returns
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-9
to n_trees * n if not provided Returns List of Documents most similar to the embedding. similarity_search_by_vector(embedding: List[float], k: int = 4, search_k: int = - 1, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to embedding vector. Parameters embedding – Embedding to look up documents similar to. k – Number of Documents to return. Defaults to 4. search_k – inspect up to search_k nodes which defaults to n_trees * n if not provided Returns List of Documents most similar to the embedding. similarity_search_with_score(query: str, k: int = 4, search_k: int = - 1) → List[Tuple[langchain.schema.Document, float]][source]# Return docs most similar to query. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. search_k – inspect up to search_k nodes which defaults to n_trees * n if not provided Returns List of Documents most similar to the query and score for each similarity_search_with_score_by_index(docstore_index: int, k: int = 4, search_k: int = - 1) → List[Tuple[langchain.schema.Document, float]][source]# Return docs most similar to query. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4.
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-10
k – Number of Documents to return. Defaults to 4. search_k – inspect up to search_k nodes which defaults to n_trees * n if not provided Returns List of Documents most similar to the query and score for each similarity_search_with_score_by_vector(embedding: List[float], k: int = 4, search_k: int = - 1) → List[Tuple[langchain.schema.Document, float]][source]# Return docs most similar to query. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. search_k – inspect up to search_k nodes which defaults to n_trees * n if not provided Returns List of Documents most similar to the query and score for each class langchain.vectorstores.AtlasDB(name: str, embedding_function: Optional[langchain.embeddings.base.Embeddings] = None, api_key: Optional[str] = None, description: str = 'A description for your project', is_public: bool = True, reset_project_if_exists: bool = False)[source]# Wrapper around Atlas: Nomic’s neural database and rhizomatic instrument. To use, you should have the nomic python package installed. Example from langchain.vectorstores import AtlasDB from langchain.embeddings.openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings() vectorstore = AtlasDB("my_project", embeddings.embed_query)
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-11
add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, refresh: bool = True, **kwargs: Any) → List[str][source]# Run more texts through the embeddings and add to the vectorstore. Parameters texts (Iterable[str]) – Texts to add to the vectorstore. metadatas (Optional[List[dict]], optional) – Optional list of metadatas. ids (Optional[List[str]]) – An optional list of ids. refresh (bool) – Whether or not to refresh indices with the updated data. Default True. Returns List of IDs of the added texts. Return type List[str] create_index(**kwargs: Any) → Any[source]# Creates an index in your project. See https://docs.nomic.ai/atlas_api.html#nomic.project.AtlasProject.create_index for full detail.
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-12
for full detail. classmethod from_documents(documents: List[langchain.schema.Document], embedding: Optional[langchain.embeddings.base.Embeddings] = None, ids: Optional[List[str]] = None, name: Optional[str] = None, api_key: Optional[str] = None, persist_directory: Optional[str] = None, description: str = 'A description for your project', is_public: bool = True, reset_project_if_exists: bool = False, index_kwargs: Optional[dict] = None, **kwargs: Any) → langchain.vectorstores.atlas.AtlasDB[source]# Create an AtlasDB vectorstore from a list of documents. Parameters name (str) – Name of the collection to create. api_key (str) – Your nomic API key, documents (List[Document]) – List of documents to add to the vectorstore. embedding (Optional[Embeddings]) – Embedding function. Defaults to None. ids (Optional[List[str]]) – Optional list of document IDs. If None, ids will be auto created description (str) – A description for your project. is_public (bool) – Whether your project is publicly accessible. True by default. reset_project_if_exists (bool) – Whether to reset this project if it already exists. Default False. Generally userful during development and testing. index_kwargs (Optional[dict]) – Dict of kwargs for index creation.
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-13
See https://docs.nomic.ai/atlas_api.html Returns Nomic’s neural database and finest rhizomatic instrument Return type AtlasDB classmethod from_texts(texts: List[str], embedding: Optional[langchain.embeddings.base.Embeddings] = None, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, name: Optional[str] = None, api_key: Optional[str] = None, description: str = 'A description for your project', is_public: bool = True, reset_project_if_exists: bool = False, index_kwargs: Optional[dict] = None, **kwargs: Any) → langchain.vectorstores.atlas.AtlasDB[source]# Create an AtlasDB vectorstore from a raw documents. Parameters texts (List[str]) – The list of texts to ingest. name (str) – Name of the project to create. api_key (str) – Your nomic API key, embedding (Optional[Embeddings]) – Embedding function. Defaults to None. metadatas (Optional[List[dict]]) – List of metadatas. Defaults to None. ids (Optional[List[str]]) – Optional list of document IDs. If None, ids will be auto created description (str) – A description for your project. is_public (bool) – Whether your project is publicly accessible. True by default. reset_project_if_exists (bool) – Whether to reset this project if it
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-14
reset_project_if_exists (bool) – Whether to reset this project if it already exists. Default False. Generally userful during development and testing. index_kwargs (Optional[dict]) – Dict of kwargs for index creation. See https://docs.nomic.ai/atlas_api.html Returns Nomic’s neural database and finest rhizomatic instrument Return type AtlasDB similarity_search(query: str, k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]# Run similarity search with AtlasDB Parameters query (str) – Query text to search for. k (int) – Number of results to return. Defaults to 4. Returns List of documents most similar to the query text. Return type List[Document] class langchain.vectorstores.Chroma(collection_name: str = 'langchain', embedding_function: Optional[Embeddings] = None, persist_directory: Optional[str] = None, client_settings: Optional[chromadb.config.Settings] = None, collection_metadata: Optional[Dict] = None, client: Optional[chromadb.Client] = None)[source]# Wrapper around ChromaDB embeddings platform. To use, you should have the chromadb python package installed. Example from langchain.vectorstores import Chroma from langchain.embeddings.openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings()
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-15
embeddings = OpenAIEmbeddings() vectorstore = Chroma("langchain_store", embeddings.embed_query) add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any) → List[str][source]# Run more texts through the embeddings and add to the vectorstore. Parameters texts (Iterable[str]) – Texts to add to the vectorstore. metadatas (Optional[List[dict]], optional) – Optional list of metadatas. ids (Optional[List[str]], optional) – Optional list of IDs. Returns List of IDs of the added texts. Return type List[str] delete_collection() → None[source]# Delete the collection. classmethod from_documents(documents: List[Document], embedding: Optional[Embeddings] = None, ids: Optional[List[str]] = None, collection_name: str = 'langchain', persist_directory: Optional[str] = None, client_settings: Optional[chromadb.config.Settings] = None, client: Optional[chromadb.Client] = None, **kwargs: Any) → Chroma[source]# Create a Chroma vectorstore from a list of documents. If a persist_directory is specified, the collection will be persisted there. Otherwise, the data will be ephemeral in-memory. Parameters
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-16
Otherwise, the data will be ephemeral in-memory. Parameters collection_name (str) – Name of the collection to create. persist_directory (Optional[str]) – Directory to persist the collection. ids (Optional[List[str]]) – List of document IDs. Defaults to None. documents (List[Document]) – List of documents to add to the vectorstore. embedding (Optional[Embeddings]) – Embedding function. Defaults to None. client_settings (Optional[chromadb.config.Settings]) – Chroma client settings Returns Chroma vectorstore. Return type Chroma classmethod from_texts(texts: List[str], embedding: Optional[Embeddings] = None, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, collection_name: str = 'langchain', persist_directory: Optional[str] = None, client_settings: Optional[chromadb.config.Settings] = None, client: Optional[chromadb.Client] = None, **kwargs: Any) → Chroma[source]# Create a Chroma vectorstore from a raw documents. If a persist_directory is specified, the collection will be persisted there. Otherwise, the data will be ephemeral in-memory. Parameters texts (List[str]) – List of texts to add to the collection. collection_name (str) – Name of the collection to create. persist_directory (Optional[str]) – Directory to persist the collection.
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-17
persist_directory (Optional[str]) – Directory to persist the collection. embedding (Optional[Embeddings]) – Embedding function. Defaults to None. metadatas (Optional[List[dict]]) – List of metadatas. Defaults to None. ids (Optional[List[str]]) – List of document IDs. Defaults to None. client_settings (Optional[chromadb.config.Settings]) – Chroma client settings Returns Chroma vectorstore. Return type Chroma max_marginal_relevance_search(query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[Dict[str, str]] = None, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. :param query: Text to look up documents similar to. :param k: Number of Documents to return. Defaults to 4. :param fetch_k: Number of Documents to fetch to pass to MMR algorithm. :param lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Parameters filter (Optional[Dict[str, str]]) – Filter by metadata. Defaults to None. Returns List of Documents selected by maximal marginal relevance.
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-18
Returns List of Documents selected by maximal marginal relevance. max_marginal_relevance_search_by_vector(embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, filter: Optional[Dict[str, str]] = None, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. :param embedding: Embedding to look up documents similar to. :param k: Number of Documents to return. Defaults to 4. :param fetch_k: Number of Documents to fetch to pass to MMR algorithm. :param lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Parameters filter (Optional[Dict[str, str]]) – Filter by metadata. Defaults to None. Returns List of Documents selected by maximal marginal relevance. persist() → None[source]# Persist the collection. This can be used to explicitly persist the data to disk. It will also be called automatically when the object is destroyed. similarity_search(query: str, k: int = 4, filter: Optional[Dict[str, str]] = None, **kwargs: Any) → List[langchain.schema.Document][source]# Run similarity search with Chroma. Parameters
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-19
Run similarity search with Chroma. Parameters query (str) – Query text to search for. k (int) – Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]) – Filter by metadata. Defaults to None. Returns List of documents most similar to the query text. Return type List[Document] similarity_search_by_vector(embedding: List[float], k: int = 4, filter: Optional[Dict[str, str]] = None, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to embedding vector. :param embedding: Embedding to look up documents similar to. :param k: Number of Documents to return. Defaults to 4. Returns List of Documents most similar to the query vector. similarity_search_with_score(query: str, k: int = 4, filter: Optional[Dict[str, str]] = None, **kwargs: Any) → List[Tuple[langchain.schema.Document, float]][source]# Run similarity search with Chroma with distance. Parameters query (str) – Query text to search for. k (int) – Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]) – Filter by metadata. Defaults to None. Returns List of documents most similar to the querytext with distance in float. Return type List[Tuple[Document, float]]
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-20
Return type List[Tuple[Document, float]] update_document(document_id: str, document: langchain.schema.Document) → None[source]# Update a document in the collection. Parameters document_id (str) – ID of the document to update. document (Document) – Document to update. class langchain.vectorstores.DeepLake(dataset_path: str = './deeplake/', token: Optional[str] = None, embedding_function: Optional[langchain.embeddings.base.Embeddings] = None, read_only: Optional[bool] = False, ingestion_batch_size: int = 1024, num_workers: int = 0, **kwargs: Any)[source]# Wrapper around Deep Lake, a data lake for deep learning applications. We implement naive similarity search and filtering for fast prototyping, but it can be extended with Tensor Query Language (TQL) for production use cases over billion rows. Why Deep Lake? Not only stores embeddings, but also the original data with version control. Serverless, doesn’t require another service and can be used with majorcloud providers (S3, GCS, etc.) More than just a multi-modal vector store. You can use the datasetto fine-tune your own LLM models. To use, you should have the deeplake python package installed. Example from langchain.vectorstores import DeepLake from langchain.embeddings.openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings()
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-21
embeddings = OpenAIEmbeddings() vectorstore = DeepLake("langchain_store", embeddings.embed_query) add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any) → List[str][source]# Run more texts through the embeddings and add to the vectorstore. Parameters texts (Iterable[str]) – Texts to add to the vectorstore. metadatas (Optional[List[dict]], optional) – Optional list of metadatas. ids (Optional[List[str]], optional) – Optional list of IDs. Returns List of IDs of the added texts. Return type List[str] delete(ids: Any[List[str], None] = None, filter: Any[Dict[str, str], None] = None, delete_all: Any[bool, None] = None) → bool[source]# Delete the entities in the dataset Parameters ids (Optional[List[str]], optional) – The document_ids to delete. Defaults to None. filter (Optional[Dict[str, str]], optional) – The filter to delete by. Defaults to None. delete_all (Optional[bool], optional) – Whether to drop the dataset. Defaults to None. delete_dataset() → None[source]# Delete the collection.
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-22
delete_dataset() → None[source]# Delete the collection. classmethod force_delete_by_path(path: str) → None[source]# Force delete dataset by path classmethod from_texts(texts: List[str], embedding: Optional[langchain.embeddings.base.Embeddings] = None, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, dataset_path: str = './deeplake/', **kwargs: Any) → langchain.vectorstores.deeplake.DeepLake[source]# Create a Deep Lake dataset from a raw documents. If a dataset_path is specified, the dataset will be persisted in that location, otherwise by default at ./deeplake Parameters path (str, pathlib.Path) – The full path to the dataset. Can be: Deep Lake cloud path of the form hub://username/dataset_name.To write to Deep Lake cloud datasets, ensure that you are logged in to Deep Lake (use ‘activeloop login’ from command line) AWS S3 path of the form s3://bucketname/path/to/dataset.Credentials are required in either the environment Google Cloud Storage path of the form``gcs://bucketname/path/to/dataset``Credentials are required in either the environment Local file system path of the form ./path/to/dataset or~/path/to/dataset or path/to/dataset.
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-23
In-memory path of the form mem://path/to/dataset which doesn’tsave the dataset, but keeps it in memory instead. Should be used only for testing as it does not persist. documents (List[Document]) – List of documents to add. embedding (Optional[Embeddings]) – Embedding function. Defaults to None. metadatas (Optional[List[dict]]) – List of metadatas. Defaults to None. ids (Optional[List[str]]) – List of document IDs. Defaults to None. Returns Deep Lake dataset. Return type DeepLake max_marginal_relevance_search(query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. :param query: Text to look up documents similar to. :param k: Number of Documents to return. Defaults to 4. :param fetch_k: Number of Documents to fetch to pass to MMR algorithm. :param lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns List of Documents selected by maximal marginal relevance.
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-24
Defaults to 0.5. Returns List of Documents selected by maximal marginal relevance. max_marginal_relevance_search_by_vector(embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. :param embedding: Embedding to look up documents similar to. :param k: Number of Documents to return. Defaults to 4. :param fetch_k: Number of Documents to fetch to pass to MMR algorithm. :param lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns List of Documents selected by maximal marginal relevance. persist() → None[source]# Persist the collection.
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-25
persist() → None[source]# Persist the collection. search(query: Any[str, None] = None, embedding: Any[float, None] = None, k: int = 4, distance_metric: str = 'L2', use_maximal_marginal_relevance: Optional[bool] = False, fetch_k: Optional[int] = 20, filter: Optional[Any[Dict[str, str], Callable, str]] = None, return_score: Optional[bool] = False, **kwargs: Any) → Any[List[Document], List[Tuple[Document, float]]][source]# Return docs most similar to query. Parameters query – Text to look up documents similar to. embedding – Embedding function to use. Defaults to None. k – Number of Documents to return. Defaults to 4. distance_metric – L2 for Euclidean, L1 for Nuclear, max L-infinity distance, cos for cosine similarity, ‘dot’ for dot product. Defaults to L2. filter – Attribute filter by metadata example {‘key’: ‘value’}. It can also filter] (take [Deep Lake) – (https – //docs.deeplake.ai/en/latest/deeplake.core.dataset.html#deeplake.core.dataset.Dataset.filter) Defaults to None. maximal_marginal_relevance – Whether to use maximal marginal relevance. Defaults to False. fetch_k – Number of Documents to fetch to pass to MMR algorithm.
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-26
fetch_k – Number of Documents to fetch to pass to MMR algorithm. Defaults to 20. return_score – Whether to return the score. Defaults to False. Returns List of Documents selected by the specified distance metric, if return_score True, return a tuple of (Document, score) similarity_search(query: str, k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to query. Parameters query – text to embed and run the query on. k – Number of Documents to return. Defaults to 4. query – Text to look up documents similar to. embedding – Embedding function to use. Defaults to None. k – Number of Documents to return. Defaults to 4. distance_metric – L2 for Euclidean, L1 for Nuclear, max L-infinity distance, cos for cosine similarity, ‘dot’ for dot product Defaults to L2. filter – Attribute filter by metadata example {‘key’: ‘value’}. Defaults to None. maximal_marginal_relevance – Whether to use maximal marginal relevance. Defaults to False. fetch_k – Number of Documents to fetch to pass to MMR algorithm. Defaults to 20. return_score – Whether to return the score. Defaults to False. Returns List of Documents most similar to the query vector.
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-27
Returns List of Documents most similar to the query vector. similarity_search_by_vector(embedding: List[float], k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to embedding vector. Parameters embedding – Embedding to look up documents similar to. k – Number of Documents to return. Defaults to 4. Returns List of Documents most similar to the query vector. similarity_search_with_score(query: str, distance_metric: str = 'L2', k: int = 4, filter: Optional[Dict[str, str]] = None) → List[Tuple[langchain.schema.Document, float]][source]# Run similarity search with Deep Lake with distance returned. Parameters query (str) – Query text to search for. distance_metric – L2 for Euclidean, L1 for Nuclear, max L-infinity distance, cos for cosine similarity, ‘dot’ for dot product. Defaults to L2. k (int) – Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]) – Filter by metadata. Defaults to None. Returns List of documents most similar to the querytext with distance in float. Return type List[Tuple[Document, float]] class langchain.vectorstores.ElasticVectorSearch(elasticsearch_url: str, index_name: str, embedding: langchain.embeddings.base.Embeddings)[source]#
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-28
Wrapper around Elasticsearch as a vector database. To connect to an Elasticsearch instance that does not require login credentials, pass the Elasticsearch URL and index name along with the embedding object to the constructor. Example from langchain import ElasticVectorSearch from langchain.embeddings import OpenAIEmbeddings embedding = OpenAIEmbeddings() elastic_vector_search = ElasticVectorSearch( elasticsearch_url="http://localhost:9200", index_name="test_index", embedding=embedding ) To connect to an Elasticsearch instance that requires login credentials, including Elastic Cloud, use the Elasticsearch URL format https://username:password@es_host:9243. For example, to connect to Elastic Cloud, create the Elasticsearch URL with the required authentication details and pass it to the ElasticVectorSearch constructor as the named parameter elasticsearch_url. You can obtain your Elastic Cloud URL and login credentials by logging in to the Elastic Cloud console at https://cloud.elastic.co, selecting your deployment, and navigating to the “Deployments” page. To obtain your Elastic Cloud password for the default “elastic” user: Log in to the Elastic Cloud console at https://cloud.elastic.co Go to “Security” > “Users” Locate the “elastic” user and click “Edit” Click “Reset password” Follow the prompts to reset the password The format for Elastic Cloud URLs is https://username:password@cluster_id.region_id.gcp.cloud.es.io:9243. Example from langchain import ElasticVectorSearch
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-29
Example from langchain import ElasticVectorSearch from langchain.embeddings import OpenAIEmbeddings embedding = OpenAIEmbeddings() elastic_host = "cluster_id.region_id.gcp.cloud.es.io" elasticsearch_url = f"https://username:password@{elastic_host}:9243" elastic_vector_search = ElasticVectorSearch( elasticsearch_url=elasticsearch_url, index_name="test_index", embedding=embedding ) Parameters elasticsearch_url (str) – The URL for the Elasticsearch instance. index_name (str) – The name of the Elasticsearch index for the embeddings. embedding (Embeddings) – An object that provides the ability to embed text. It should be an instance of a class that subclasses the Embeddings abstract base class, such as OpenAIEmbeddings() Raises ValueError – If the elasticsearch python package is not installed. add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, refresh_indices: bool = True, **kwargs: Any) → List[str][source]# Run more texts through the embeddings and add to the vectorstore. Parameters texts – Iterable of strings to add to the vectorstore. metadatas – Optional list of metadatas associated with the texts. refresh_indices – bool to refresh ElasticSearch indices Returns List of ids from adding the texts into the vectorstore.
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-30
Returns List of ids from adding the texts into the vectorstore. classmethod from_texts(texts: List[str], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any) → langchain.vectorstores.elastic_vector_search.ElasticVectorSearch[source]# Construct ElasticVectorSearch wrapper from raw documents. This is a user-friendly interface that: Embeds documents. Creates a new index for the embeddings in the Elasticsearch instance. Adds the documents to the newly created Elasticsearch index. This is intended to be a quick way to get started. Example from langchain import ElasticVectorSearch from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() elastic_vector_search = ElasticVectorSearch.from_texts( texts, embeddings, elasticsearch_url="http://localhost:9200" ) similarity_search(query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to query. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. Returns List of Documents most similar to the query.
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-31
Returns List of Documents most similar to the query. similarity_search_with_score(query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any) → List[Tuple[langchain.schema.Document, float]][source]# Return docs most similar to query. :param query: Text to look up documents similar to. :param k: Number of Documents to return. Defaults to 4. Returns List of Documents most similar to the query. class langchain.vectorstores.FAISS(embedding_function: typing.Callable, index: typing.Any, docstore: langchain.docstore.base.Docstore, index_to_docstore_id: typing.Dict[int, str], relevance_score_fn: typing.Optional[typing.Callable[[float], float]] = <function _default_relevance_score_fn>)[source]# Wrapper around FAISS vector database. To use, you should have the faiss python package installed. Example from langchain import FAISS faiss = FAISS(embedding_function, index, docstore, index_to_docstore_id) add_embeddings(text_embeddings: Iterable[Tuple[str, List[float]]], metadatas: Optional[List[dict]] = None, **kwargs: Any) → List[str][source]# Run more texts through the embeddings and add to the vectorstore. Parameters text_embeddings – Iterable pairs of string and embedding to
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-32
text_embeddings – Iterable pairs of string and embedding to add to the vectorstore. metadatas – Optional list of metadatas associated with the texts. Returns List of ids from adding the texts into the vectorstore. add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any) → List[str][source]# Run more texts through the embeddings and add to the vectorstore. Parameters texts – Iterable of strings to add to the vectorstore. metadatas – Optional list of metadatas associated with the texts. Returns List of ids from adding the texts into the vectorstore. classmethod from_embeddings(text_embeddings: List[Tuple[str, List[float]]], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any) → langchain.vectorstores.faiss.FAISS[source]# Construct FAISS wrapper from raw documents. This is a user friendly interface that: Embeds documents. Creates an in memory docstore Initializes the FAISS database This is intended to be a quick way to get started. Example from langchain import FAISS from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings))
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-33
faiss = FAISS.from_embeddings(text_embedding_pairs, embeddings) classmethod from_texts(texts: List[str], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any) → langchain.vectorstores.faiss.FAISS[source]# Construct FAISS wrapper from raw documents. This is a user friendly interface that: Embeds documents. Creates an in memory docstore Initializes the FAISS database This is intended to be a quick way to get started. Example from langchain import FAISS from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() faiss = FAISS.from_texts(texts, embeddings) classmethod load_local(folder_path: str, embeddings: langchain.embeddings.base.Embeddings, index_name: str = 'index') → langchain.vectorstores.faiss.FAISS[source]# Load FAISS index, docstore, and index_to_docstore_id to disk. Parameters folder_path – folder path to load index, docstore, and index_to_docstore_id from. embeddings – Embeddings to use when generating queries index_name – for saving with a specific index file name
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-34
index_name – for saving with a specific index file name max_marginal_relevance_search(query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. fetch_k – Number of Documents to fetch to pass to MMR algorithm. lambda_mult – Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns List of Documents selected by maximal marginal relevance. max_marginal_relevance_search_by_vector(embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Parameters embedding – Embedding to look up documents similar to. k – Number of Documents to return. Defaults to 4. fetch_k – Number of Documents to fetch to pass to MMR algorithm. lambda_mult – Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-35
of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns List of Documents selected by maximal marginal relevance. merge_from(target: langchain.vectorstores.faiss.FAISS) → None[source]# Merge another FAISS object with the current one. Add the target FAISS to the current one. Parameters target – FAISS object you wish to merge into the current one Returns None. save_local(folder_path: str, index_name: str = 'index') → None[source]# Save FAISS index, docstore, and index_to_docstore_id to disk. Parameters folder_path – folder path to save index, docstore, and index_to_docstore_id to. index_name – for saving with a specific index file name similarity_search(query: str, k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to query. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. Returns List of Documents most similar to the query. similarity_search_by_vector(embedding: List[float], k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to embedding vector. Parameters embedding – Embedding to look up documents similar to. k – Number of Documents to return. Defaults to 4. Returns List of Documents most similar to the embedding.
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-36
Returns List of Documents most similar to the embedding. similarity_search_with_score(query: str, k: int = 4) → List[Tuple[langchain.schema.Document, float]][source]# Return docs most similar to query. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. Returns List of Documents most similar to the query and score for each similarity_search_with_score_by_vector(embedding: List[float], k: int = 4) → List[Tuple[langchain.schema.Document, float]][source]# Return docs most similar to query. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. Returns List of Documents most similar to the query and score for each class langchain.vectorstores.Milvus(embedding_function: Embeddings, collection_name: str = 'LangChainCollection', connection_args: Optional[dict[str, Any]] = None, consistency_level: str = 'Session', index_params: Optional[dict] = None, search_params: Optional[dict] = None, drop_old: Optional[bool] = False)[source]# Wrapper around the Milvus vector database.
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-37
Wrapper around the Milvus vector database. add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, timeout: Optional[int] = None, batch_size: int = 1000, **kwargs: Any) → List[str][source]# Insert text data into Milvus. Inserting data when the collection has not be made yet will result in creating a new Collection. The data of the first entity decides the schema of the new collection, the dim is extracted from the first embedding and the columns are decided by the first metadata dict. Metada keys will need to be present for all inserted values. At the moment there is no None equivalent in Milvus. Parameters texts (Iterable[str]) – The texts to embed, it is assumed that they all fit in memory. metadatas (Optional[List[dict]]) – Metadata dicts attached to each of the texts. Defaults to None. timeout (Optional[int]) – Timeout for each batch insert. Defaults to None. batch_size (int, optional) – Batch size to use for insertion. Defaults to 1000. Raises MilvusException – Failure to add texts Returns The resulting keys for each inserted element. Return type List[str]
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-38
Returns The resulting keys for each inserted element. Return type List[str] classmethod from_texts(texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, collection_name: str = 'LangChainCollection', connection_args: dict[str, Any] = {'host': 'localhost', 'password': '', 'port': '19530', 'secure': False, 'user': ''}, consistency_level: str = 'Session', index_params: Optional[dict] = None, search_params: Optional[dict] = None, drop_old: bool = False, **kwargs: Any) → Milvus[source]# Create a Milvus collection, indexes it with HNSW, and insert data. Parameters texts (List[str]) – Text data. embedding (Embeddings) – Embedding function. metadatas (Optional[List[dict]]) – Metadata for each text if it exists. Defaults to None. collection_name (str, optional) – Collection name to use. Defaults to “LangChainCollection”. connection_args (dict[str, Any], optional) – Connection args to use. Defaults to DEFAULT_MILVUS_CONNECTION. consistency_level (str, optional) – Which consistency level to use. Defaults to “Session”. index_params (Optional[dict], optional) – Which index_params to use. Defaults
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-39
to None. search_params (Optional[dict], optional) – Which search params to use. Defaults to None. drop_old (Optional[bool], optional) – Whether to drop the collection with that name if it exists. Defaults to False. Returns Milvus Vector Store Return type Milvus max_marginal_relevance_search(query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, param: Optional[dict] = None, expr: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any) → List[langchain.schema.Document][source]# Perform a search and return results that are reordered by MMR. Parameters query (str) – The text being searched. k (int, optional) – How many results to give. Defaults to 4. fetch_k (int, optional) – Total results to select k from. Defaults to 20. lambda_mult – Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5 param (dict, optional) – The search params for the specified index. Defaults to None. expr (str, optional) – Filtering expression. Defaults to None. timeout (int, optional) – How long to wait before timeout error. Defaults to None. kwargs – Collection.search() keyword arguments. Returns Document results for search. Return type List[Document]
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-40
Returns Document results for search. Return type List[Document] max_marginal_relevance_search_by_vector(embedding: list[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, param: Optional[dict] = None, expr: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any) → List[Document][source]# Perform a search and return results that are reordered by MMR. Parameters embedding (str) – The embedding vector being searched. k (int, optional) – How many results to give. Defaults to 4. fetch_k (int, optional) – Total results to select k from. Defaults to 20. lambda_mult – Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5 param (dict, optional) – The search params for the specified index. Defaults to None. expr (str, optional) – Filtering expression. Defaults to None. timeout (int, optional) – How long to wait before timeout error. Defaults to None. kwargs – Collection.search() keyword arguments. Returns Document results for search. Return type List[Document]
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-41
Returns Document results for search. Return type List[Document] similarity_search(query: str, k: int = 4, param: Optional[dict] = None, expr: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any) → List[langchain.schema.Document][source]# Perform a similarity search against the query string. Parameters query (str) – The text to search. k (int, optional) – How many results to return. Defaults to 4. param (dict, optional) – The search params for the index type. Defaults to None. expr (str, optional) – Filtering expression. Defaults to None. timeout (int, optional) – How long to wait before timeout error. Defaults to None. kwargs – Collection.search() keyword arguments. Returns Document results for search. Return type List[Document] similarity_search_by_vector(embedding: List[float], k: int = 4, param: Optional[dict] = None, expr: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any) → List[langchain.schema.Document][source]# Perform a similarity search against the query string. Parameters embedding (List[float]) – The embedding vector to search. k (int, optional) – How many results to return. Defaults to 4. param (dict, optional) – The search params for the index type. Defaults to None.
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-42
Defaults to None. expr (str, optional) – Filtering expression. Defaults to None. timeout (int, optional) – How long to wait before timeout error. Defaults to None. kwargs – Collection.search() keyword arguments. Returns Document results for search. Return type List[Document] similarity_search_with_score(query: str, k: int = 4, param: Optional[dict] = None, expr: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any) → List[Tuple[langchain.schema.Document, float]][source]# Perform a search on a query string and return results with score. For more information about the search parameters, take a look at the pymilvus documentation found here: https://milvus.io/api-reference/pymilvus/v2.2.6/Collection/search().md Parameters query (str) – The text being searched. k (int, optional) – The amount of results ot return. Defaults to 4. param (dict) – The search params for the specified index. Defaults to None. expr (str, optional) – Filtering expression. Defaults to None. timeout (int, optional) – How long to wait before timeout error. Defaults to None. kwargs – Collection.search() keyword arguments. Return type List[float], List[Tuple[Document, any, any]]
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-43
Return type List[float], List[Tuple[Document, any, any]] similarity_search_with_score_by_vector(embedding: List[float], k: int = 4, param: Optional[dict] = None, expr: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any) → List[Tuple[langchain.schema.Document, float]][source]# Perform a search on a query string and return results with score. For more information about the search parameters, take a look at the pymilvus documentation found here: https://milvus.io/api-reference/pymilvus/v2.2.6/Collection/search().md Parameters embedding (List[float]) – The embedding vector being searched. k (int, optional) – The amount of results ot return. Defaults to 4. param (dict) – The search params for the specified index. Defaults to None. expr (str, optional) – Filtering expression. Defaults to None. timeout (int, optional) – How long to wait before timeout error. Defaults to None. kwargs – Collection.search() keyword arguments. Returns Result doc and score. Return type List[Tuple[Document, float]] class langchain.vectorstores.MyScale(embedding: langchain.embeddings.base.Embeddings, config: Optional[langchain.vectorstores.myscale.MyScaleSettings] = None, **kwargs: Any)[source]# Wrapper around MyScale vector database
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-44
Wrapper around MyScale vector database You need a clickhouse-connect python package, and a valid account to connect to MyScale. MyScale can not only search with simple vector indexes, it also supports complex query with multiple conditions, constraints and even sub-queries. For more information, please visit[myscale official site](https://docs.myscale.com/en/overview/) add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, batch_size: int = 32, ids: Optional[Iterable[str]] = None, **kwargs: Any) → List[str][source]# Run more texts through the embeddings and add to the vectorstore. Parameters texts – Iterable of strings to add to the vectorstore. ids – Optional list of ids to associate with the texts. batch_size – Batch size of insertion metadata – Optional column data to be inserted Returns List of ids from adding the texts into the vectorstore. drop() → None[source]# Helper function: Drop data escape_str(value: str) → str[source]#
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-45
escape_str(value: str) → str[source]# classmethod from_texts(texts: List[str], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[Dict[Any, Any]]] = None, config: Optional[langchain.vectorstores.myscale.MyScaleSettings] = None, text_ids: Optional[Iterable[str]] = None, batch_size: int = 32, **kwargs: Any) → langchain.vectorstores.myscale.MyScale[source]# Create Myscale wrapper with existing texts Parameters embedding_function (Embeddings) – Function to extract text embedding texts (Iterable[str]) – List or tuple of strings to be added config (MyScaleSettings, Optional) – Myscale configuration text_ids (Optional[Iterable], optional) – IDs for the texts. Defaults to None. batch_size (int, optional) – Batchsize when transmitting data to MyScale. Defaults to 32. metadata (List[dict], optional) – metadata to texts. Defaults to None. into (Other keyword arguments will pass) – [clickhouse-connect](https://clickhouse.com/docs/en/integrations/python#clickhouse-connect-driver-api) Returns MyScale Index property metadata_column: str#
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-46
Returns MyScale Index property metadata_column: str# similarity_search(query: str, k: int = 4, where_str: Optional[str] = None, **kwargs: Any) → List[langchain.schema.Document][source]# Perform a similarity search with MyScale Parameters query (str) – query string k (int, optional) – Top K neighbors to retrieve. Defaults to 4. where_str (Optional[str], optional) – where condition string. Defaults to None. NOTE – Please do not let end-user to fill this and always be aware of SQL injection. When dealing with metadatas, remember to use {self.metadata_column}.attribute instead of attribute alone. The default name for it is metadata. Returns List of Documents Return type List[Document] similarity_search_by_vector(embedding: List[float], k: int = 4, where_str: Optional[str] = None, **kwargs: Any) → List[langchain.schema.Document][source]# Perform a similarity search with MyScale by vectors Parameters query (str) – query string k (int, optional) – Top K neighbors to retrieve. Defaults to 4. where_str (Optional[str], optional) – where condition string. Defaults to None. NOTE – Please do not let end-user to fill this and always be aware of SQL injection. When dealing with metadatas, remember to use {self.metadata_column}.attribute instead of attribute alone. The default name for it is metadata.
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-47
alone. The default name for it is metadata. Returns List of (Document, similarity) Return type List[Document] similarity_search_with_relevance_scores(query: str, k: int = 4, where_str: Optional[str] = None, **kwargs: Any) → List[Tuple[langchain.schema.Document, float]][source]# Perform a similarity search with MyScale Parameters query (str) – query string k (int, optional) – Top K neighbors to retrieve. Defaults to 4. where_str (Optional[str], optional) – where condition string. Defaults to None. NOTE – Please do not let end-user to fill this and always be aware of SQL injection. When dealing with metadatas, remember to use {self.metadata_column}.attribute instead of attribute alone. The default name for it is metadata. Returns List of documents Return type List[Document] pydantic settings langchain.vectorstores.MyScaleSettings[source]# MyScale Client Configuration Attribute: myscale_host (str)An URL to connect to MyScale backend.Defaults to ‘localhost’. myscale_port (int) : URL port to connect with HTTP. Defaults to 8443. username (str) : Usernamed to login. Defaults to None. password (str) : Password to login. Defaults to None. index_type (str): index type string. index_param (dict): index build parameter. database (str) : Database name to find the table. Defaults to ‘default’.
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-48
table (str) : Table name to operate on. Defaults to ‘vector_table’. metric (str)Metric to compute distance,supported are (‘l2’, ‘cosine’, ‘ip’). Defaults to ‘cosine’. column_map (Dict)Column type map to project column name onto langchainsemantics. Must have keys: text, id, vector, must be same size to number of columns. For example: .. code-block:: python { ‘id’: ‘text_id’, ‘vector’: ‘text_embedding’, ‘text’: ‘text_plain’, ‘metadata’: ‘metadata_dictionary_in_json’, } Defaults to identity map. Show JSON schema{ "title": "MyScaleSettings",
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-49
"description": "MyScale Client Configuration\n\nAttribute:\n myscale_host (str) : An URL to connect to MyScale backend.\n Defaults to 'localhost'.\n myscale_port (int) : URL port to connect with HTTP. Defaults to 8443.\n username (str) : Usernamed to login. Defaults to None.\n password (str) : Password to login. Defaults to None.\n index_type (str): index type string.\n index_param (dict): index build parameter.\n database (str) : Database name to find the table. Defaults to 'default'.\n table (str) : Table name to operate on.\n Defaults to 'vector_table'.\n metric (str) : Metric to compute distance,\n supported are ('l2', 'cosine', 'ip'). Defaults to 'cosine'.\n column_map (Dict) : Column type map to project column name onto langchain\n semantics. Must have keys: `text`, `id`, `vector`,\n must be same size to number of columns. For example:\n .. code-block:: python\n {\n 'id': 'text_id',\n 'vector': 'text_embedding',\n 'text': 'text_plain',\n 'metadata': 'metadata_dictionary_in_json',\n }\n\n Defaults to identity map.",
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-50
"type": "object", "properties": { "host": { "title": "Host", "default": "localhost", "env_names": "{'myscale_host'}", "type": "string" }, "port": { "title": "Port", "default": 8443, "env_names": "{'myscale_port'}", "type": "integer" }, "username": { "title": "Username", "env_names": "{'myscale_username'}", "type": "string" }, "password": { "title": "Password", "env_names": "{'myscale_password'}", "type": "string" }, "index_type": { "title": "Index Type", "default": "IVFFLAT", "env_names": "{'myscale_index_type'}", "type": "string" }, "index_param": { "title": "Index Param", "env_names": "{'myscale_index_param'}", "type": "object", "additionalProperties": { "type": "string" } }, "column_map": { "title": "Column Map", "default": { "id": "id", "text": "text", "vector": "vector", "metadata": "metadata" }, "env_names": "{'myscale_column_map'}",
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-51
"type": "object", "additionalProperties": { "type": "string" } }, "database": { "title": "Database", "default": "default", "env_names": "{'myscale_database'}", "type": "string" }, "table": { "title": "Table", "default": "langchain", "env_names": "{'myscale_table'}", "type": "string" }, "metric": { "title": "Metric", "default": "cosine", "env_names": "{'myscale_metric'}", "type": "string" } }, "additionalProperties": false } Config env_file: str = .env env_file_encoding: str = utf-8 env_prefix: str = myscale_ Fields column_map (Dict[str, str]) database (str) host (str) index_param (Optional[Dict[str, str]]) index_type (str) metric (str) password (Optional[str]) port (int) table (str) username (Optional[str]) field column_map: Dict[str, str] = {'id': 'id', 'metadata': 'metadata', 'text': 'text', 'vector': 'vector'}# field database: str = 'default'# field host: str = 'localhost'# field index_param: Optional[Dict[str, str]] = None#
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-52
field index_param: Optional[Dict[str, str]] = None# field index_type: str = 'IVFFLAT'# field metric: str = 'cosine'# field password: Optional[str] = None# field port: int = 8443# field table: str = 'langchain'# field username: Optional[str] = None# class langchain.vectorstores.OpenSearchVectorSearch(opensearch_url: str, index_name: str, embedding_function: langchain.embeddings.base.Embeddings, **kwargs: Any)[source]# Wrapper around OpenSearch as a vector database. Example from langchain import OpenSearchVectorSearch opensearch_vector_search = OpenSearchVectorSearch( "http://localhost:9200", "embeddings", embedding_function ) add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, bulk_size: int = 500, **kwargs: Any) → List[str][source]# Run more texts through the embeddings and add to the vectorstore. Parameters texts – Iterable of strings to add to the vectorstore. metadatas – Optional list of metadatas associated with the texts. bulk_size – Bulk API request count; Default: 500 Returns List of ids from adding the texts into the vectorstore. Optional Args:vector_field: Document field embeddings are stored in. Defaults to “vector_field”. text_field: Document field the text of the document is stored in. Defaults
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-53
text_field: Document field the text of the document is stored in. Defaults to “text”. classmethod from_texts(texts: List[str], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, bulk_size: int = 500, **kwargs: Any) → langchain.vectorstores.opensearch_vector_search.OpenSearchVectorSearch[source]# Construct OpenSearchVectorSearch wrapper from raw documents. Example from langchain import OpenSearchVectorSearch from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() opensearch_vector_search = OpenSearchVectorSearch.from_texts( texts, embeddings, opensearch_url="http://localhost:9200" ) OpenSearch by default supports Approximate Search powered by nmslib, faiss and lucene engines recommended for large datasets. Also supports brute force search through Script Scoring and Painless Scripting. Optional Args:vector_field: Document field embeddings are stored in. Defaults to “vector_field”. text_field: Document field the text of the document is stored in. Defaults to “text”. Optional Keyword Args for Approximate Search:engine: “nmslib”, “faiss”, “lucene”; default: “nmslib” space_type: “l2”, “l1”, “cosinesimil”, “linf”, “innerproduct”; default: “l2” ef_search: Size of the dynamic list used during k-NN searches. Higher values
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-54
ef_search: Size of the dynamic list used during k-NN searches. Higher values lead to more accurate but slower searches; default: 512 ef_construction: Size of the dynamic list used during k-NN graph creation. Higher values lead to more accurate graph but slower indexing speed; default: 512 m: Number of bidirectional links created for each new element. Large impact on memory consumption. Between 2 and 100; default: 16 Keyword Args for Script Scoring or Painless Scripting:is_appx_search: False similarity_search(query: str, k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to query. By default supports Approximate Search. Also supports Script Scoring and Painless Scripting. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. Returns List of Documents most similar to the query. Optional Args:vector_field: Document field embeddings are stored in. Defaults to “vector_field”. text_field: Document field the text of the document is stored in. Defaults to “text”. metadata_field: Document field that metadata is stored in. Defaults to “metadata”. Can be set to a special value “*” to include the entire document. Optional Args for Approximate Search:search_type: “approximate_search”; default: “approximate_search” size: number of results the query actually returns; default: 4 boolean_filter: A Boolean filter consists of a Boolean query that
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-55
boolean_filter: A Boolean filter consists of a Boolean query that contains a k-NN query and a filter. subquery_clause: Query clause on the knn vector field; default: “must” lucene_filter: the Lucene algorithm decides whether to perform an exact k-NN search with pre-filtering or an approximate search with modified post-filtering. Optional Args for Script Scoring Search:search_type: “script_scoring”; default: “approximate_search” space_type: “l2”, “l1”, “linf”, “cosinesimil”, “innerproduct”, “hammingbit”; default: “l2” pre_filter: script_score query to pre-filter documents before identifying nearest neighbors; default: {“match_all”: {}} Optional Args for Painless Scripting Search:search_type: “painless_scripting”; default: “approximate_search” space_type: “l2Squared”, “l1Norm”, “cosineSimilarity”; default: “l2Squared” pre_filter: script_score query to pre-filter documents before identifying nearest neighbors; default: {“match_all”: {}} class langchain.vectorstores.Pinecone(index: Any, embedding_function: Callable, text_key: str, namespace: Optional[str] = None)[source]# Wrapper around Pinecone vector database. To use, you should have the pinecone-client python package installed. Example from langchain.vectorstores import Pinecone
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-56
Example from langchain.vectorstores import Pinecone from langchain.embeddings.openai import OpenAIEmbeddings import pinecone # The environment should be the one specified next to the API key # in your Pinecone console pinecone.init(api_key="***", environment="...") index = pinecone.Index("langchain-demo") embeddings = OpenAIEmbeddings() vectorstore = Pinecone(index, embeddings.embed_query, "text") add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, namespace: Optional[str] = None, batch_size: int = 32, **kwargs: Any) → List[str][source]# Run more texts through the embeddings and add to the vectorstore. Parameters texts – Iterable of strings to add to the vectorstore. metadatas – Optional list of metadatas associated with the texts. ids – Optional list of ids to associate with the texts. namespace – Optional pinecone namespace to add the texts to. Returns List of ids from adding the texts into the vectorstore. classmethod from_existing_index(index_name: str, embedding: langchain.embeddings.base.Embeddings, text_key: str = 'text', namespace: Optional[str] = None) → langchain.vectorstores.pinecone.Pinecone[source]# Load pinecone vectorstore from index name.
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-57
Load pinecone vectorstore from index name. classmethod from_texts(texts: List[str], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, batch_size: int = 32, text_key: str = 'text', index_name: Optional[str] = None, namespace: Optional[str] = None, **kwargs: Any) → langchain.vectorstores.pinecone.Pinecone[source]# Construct Pinecone wrapper from raw documents. This is a user friendly interface that: Embeds documents. Adds the documents to a provided Pinecone index This is intended to be a quick way to get started. Example from langchain import Pinecone from langchain.embeddings import OpenAIEmbeddings import pinecone # The environment should be the one specified next to the API key # in your Pinecone console pinecone.init(api_key="***", environment="...") embeddings = OpenAIEmbeddings() pinecone = Pinecone.from_texts( texts, embeddings, index_name="langchain-demo" ) similarity_search(query: str, k: int = 4, filter: Optional[dict] = None, namespace: Optional[str] = None, **kwargs: Any) → List[langchain.schema.Document][source]# Return pinecone documents most similar to query. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4.
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-58
k – Number of Documents to return. Defaults to 4. filter – Dictionary of argument(s) to filter on metadata namespace – Namespace to search in. Default will search in ‘’ namespace. Returns List of Documents most similar to the query and score for each similarity_search_with_score(query: str, k: int = 4, filter: Optional[dict] = None, namespace: Optional[str] = None) → List[Tuple[langchain.schema.Document, float]][source]# Return pinecone documents most similar to query, along with scores. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. filter – Dictionary of argument(s) to filter on metadata namespace – Namespace to search in. Default will search in ‘’ namespace. Returns List of Documents most similar to the query and score for each class langchain.vectorstores.Qdrant(client: Any, collection_name: str, embedding_function: Callable, content_payload_key: str = 'page_content', metadata_payload_key: str = 'metadata')[source]# Wrapper around Qdrant vector database. To use you should have the qdrant-client package installed. Example from qdrant_client import QdrantClient from langchain import Qdrant client = QdrantClient() collection_name = "MyCollection" qdrant = Qdrant(client, collection_name, embedding_function) CONTENT_KEY = 'page_content'# METADATA_KEY = 'metadata'#
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-59
METADATA_KEY = 'metadata'# add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any) → List[str][source]# Run more texts through the embeddings and add to the vectorstore. Parameters texts – Iterable of strings to add to the vectorstore. metadatas – Optional list of metadatas associated with the texts. Returns List of ids from adding the texts into the vectorstore. classmethod from_texts(texts: List[str], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, location: Optional[str] = None, url: Optional[str] = None, port: Optional[int] = 6333, grpc_port: int = 6334, prefer_grpc: bool = False, https: Optional[bool] = None, api_key: Optional[str] = None, prefix: Optional[str] = None, timeout: Optional[float] = None, host: Optional[str] = None, path: Optional[str] = None, collection_name: Optional[str] = None, distance_func: str = 'Cosine', content_payload_key: str = 'page_content', metadata_payload_key: str = 'metadata', **kwargs: Any) → langchain.vectorstores.qdrant.Qdrant[source]# Construct Qdrant wrapper from a list of texts.
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-60
Construct Qdrant wrapper from a list of texts. Parameters texts – A list of texts to be indexed in Qdrant. embedding – A subclass of Embeddings, responsible for text vectorization. metadatas – An optional list of metadata. If provided it has to be of the same length as a list of texts. location – If :memory: - use in-memory Qdrant instance. If str - use it as a url parameter. If None - fallback to relying on host and port parameters. url – either host or str of “Optional[scheme], host, Optional[port], Optional[prefix]”. Default: None port – Port of the REST API interface. Default: 6333 grpc_port – Port of the gRPC interface. Default: 6334 prefer_grpc – If true - use gPRC interface whenever possible in custom methods. Default: False https – If true - use HTTPS(SSL) protocol. Default: None api_key – API key for authentication in Qdrant Cloud. Default: None prefix – If not None - add prefix to the REST URL path. Example: service/v1 will result in http://localhost:6333/service/v1/{qdrant-endpoint} for REST API. Default: None timeout – Timeout for REST and gRPC API requests. Default: 5.0 seconds for REST and unlimited for gRPC host – Host name of Qdrant service. If url and host are None, set to ‘localhost’. Default: None
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-61
‘localhost’. Default: None path – Path in which the vectors will be stored while using local mode. Default: None collection_name – Name of the Qdrant collection to be used. If not provided, it will be created randomly. Default: None distance_func – Distance function. One of: “Cosine” / “Euclid” / “Dot”. Default: “Cosine” content_payload_key – A payload key used to store the content of the document. Default: “page_content” metadata_payload_key – A payload key used to store the metadata of the document. Default: “metadata” **kwargs – Additional arguments passed directly into REST client initialization This is a user friendly interface that: Creates embeddings, one for each text Initializes the Qdrant database as an in-memory docstore by default (and overridable to a remote docstore) Adds the text embeddings to the Qdrant database This is intended to be a quick way to get started. Example from langchain import Qdrant from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() qdrant = Qdrant.from_texts(texts, embeddings, "localhost") max_marginal_relevance_search(query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs selected using the maximal marginal relevance.
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-62
Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. fetch_k – Number of Documents to fetch to pass to MMR algorithm. Defaults to 20. lambda_mult – Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns List of Documents selected by maximal marginal relevance. similarity_search(query: str, k: int = 4, filter: Optional[Dict[str, Union[str, int, bool]]] = None, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to query. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. filter – Filter by metadata. Defaults to None. Returns List of Documents most similar to the query. similarity_search_with_score(query: str, k: int = 4, filter: Optional[Dict[str, Union[str, int, bool]]] = None) → List[Tuple[langchain.schema.Document, float]][source]# Return docs most similar to query. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. filter – Filter by metadata. Defaults to None. Returns
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-63
filter – Filter by metadata. Defaults to None. Returns List of Documents most similar to the query and score for each. class langchain.vectorstores.SupabaseVectorStore(client: supabase.client.Client, embedding: Embeddings, table_name: str, query_name: Union[str, None] = None)[source]# VectorStore for a Supabase postgres database. Assumes you have the pgvector extension installed and a match_documents (or similar) function. For more details: https://js.langchain.com/docs/modules/indexes/vector_stores/integrations/supabase You can implement your own match_documents function in order to limit the search space to a subset of documents based on your own authorization or business logic. Note that the Supabase Python client does not yet support async operations. If you’d like to use max_marginal_relevance_search, please review the instructions below on modifying the match_documents function to return matched embeddings. add_texts(texts: Iterable[str], metadatas: Optional[List[dict[Any, Any]]] = None, **kwargs: Any) → List[str][source]# Run more texts through the embeddings and add to the vectorstore. Parameters texts – Iterable of strings to add to the vectorstore. metadatas – Optional list of metadatas associated with the texts. kwargs – vectorstore specific parameters Returns List of ids from adding the texts into the vectorstore.
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-64
Returns List of ids from adding the texts into the vectorstore. add_vectors(vectors: List[List[float]], documents: List[langchain.schema.Document]) → List[str][source]# classmethod from_texts(texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, client: Optional[supabase.client.Client] = None, table_name: Optional[str] = 'documents', query_name: Union[str, None] = 'match_documents', **kwargs: Any) → SupabaseVectorStore[source]# Return VectorStore initialized from texts and embeddings. max_marginal_relevance_search(query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. fetch_k – Number of Documents to fetch to pass to MMR algorithm. lambda_mult – Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns List of Documents selected by maximal marginal relevance.
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-65
Defaults to 0.5. Returns List of Documents selected by maximal marginal relevance. max_marginal_relevance_search requires that query_name returns matched embeddings alongside the match documents. The following function function demonstrates how to do this: ```sql CREATE FUNCTION match_documents_embeddings(query_embedding vector(1536), match_count int) RETURNS TABLE(id bigint, content text, metadata jsonb, embedding vector(1536), similarity float) LANGUAGE plpgsql AS $$ # variable_conflict use_column BEGINRETURN query SELECT id, content, metadata, embedding, 1 -(docstore.embedding <=> query_embedding) AS similarity FROMdocstore ORDER BYdocstore.embedding <=> query_embedding LIMIT match_count; END; $$;``` max_marginal_relevance_search_by_vector(embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Parameters embedding – Embedding to look up documents similar to. k – Number of Documents to return. Defaults to 4. fetch_k – Number of Documents to fetch to pass to MMR algorithm. lambda_mult – Number between 0 and 1 that determines the degree
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-66
lambda_mult – Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns List of Documents selected by maximal marginal relevance. query_name: str# similarity_search(query: str, k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to query. similarity_search_by_vector(embedding: List[float], k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to embedding vector. Parameters embedding – Embedding to look up documents similar to. k – Number of Documents to return. Defaults to 4. Returns List of Documents most similar to the query vector. similarity_search_by_vector_returning_embeddings(query: List[float], k: int) → List[Tuple[Document, float, np.ndarray[np.float32, Any]]][source]# similarity_search_by_vector_with_relevance_scores(query: List[float], k: int) → List[Tuple[langchain.schema.Document, float]][source]# similarity_search_with_relevance_scores(query: str, k: int = 4, **kwargs: Any) → List[Tuple[langchain.schema.Document, float]][source]#
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-67
Return docs and relevance scores in the range [0, 1]. 0 is dissimilar, 1 is most similar. table_name: str# class langchain.vectorstores.VectorStore[source]# Interface for vector stores. async aadd_documents(documents: List[langchain.schema.Document], **kwargs: Any) → List[str][source]# Run more documents through the embeddings and add to the vectorstore. Parameters (List[Document] (documents) – Documents to add to the vectorstore. Returns List of IDs of the added texts. Return type List[str] async aadd_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any) → List[str][source]# Run more texts through the embeddings and add to the vectorstore. add_documents(documents: List[langchain.schema.Document], **kwargs: Any) → List[str][source]# Run more documents through the embeddings and add to the vectorstore. Parameters (List[Document] (documents) – Documents to add to the vectorstore. Returns List of IDs of the added texts. Return type List[str] abstract add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any) → List[str][source]# Run more texts through the embeddings and add to the vectorstore. Parameters texts – Iterable of strings to add to the vectorstore.
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-68
Parameters texts – Iterable of strings to add to the vectorstore. metadatas – Optional list of metadatas associated with the texts. kwargs – vectorstore specific parameters Returns List of ids from adding the texts into the vectorstore. async classmethod afrom_documents(documents: List[langchain.schema.Document], embedding: langchain.embeddings.base.Embeddings, **kwargs: Any) → langchain.vectorstores.base.VST[source]# Return VectorStore initialized from documents and embeddings. async classmethod afrom_texts(texts: List[str], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any) → langchain.vectorstores.base.VST[source]# Return VectorStore initialized from texts and embeddings. async amax_marginal_relevance_search(query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs selected using the maximal marginal relevance. async amax_marginal_relevance_search_by_vector(embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any) → List[langchain.schema.Document][source]#
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-69
Return docs selected using the maximal marginal relevance. as_retriever(**kwargs: Any) → langchain.schema.BaseRetriever[source]# async asimilarity_search(query: str, k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to query. async asimilarity_search_by_vector(embedding: List[float], k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to embedding vector. classmethod from_documents(documents: List[langchain.schema.Document], embedding: langchain.embeddings.base.Embeddings, **kwargs: Any) → langchain.vectorstores.base.VST[source]# Return VectorStore initialized from documents and embeddings. abstract classmethod from_texts(texts: List[str], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any) → langchain.vectorstores.base.VST[source]# Return VectorStore initialized from texts and embeddings. max_marginal_relevance_search(query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs selected using the maximal marginal relevance.
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-70
Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. fetch_k – Number of Documents to fetch to pass to MMR algorithm. lambda_mult – Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns List of Documents selected by maximal marginal relevance. max_marginal_relevance_search_by_vector(embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Parameters embedding – Embedding to look up documents similar to. k – Number of Documents to return. Defaults to 4. fetch_k – Number of Documents to fetch to pass to MMR algorithm. lambda_mult – Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns List of Documents selected by maximal marginal relevance. abstract similarity_search(query: str, k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to query.
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-71
Return docs most similar to query. similarity_search_by_vector(embedding: List[float], k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to embedding vector. Parameters embedding – Embedding to look up documents similar to. k – Number of Documents to return. Defaults to 4. Returns List of Documents most similar to the query vector. similarity_search_with_relevance_scores(query: str, k: int = 4, **kwargs: Any) → List[Tuple[langchain.schema.Document, float]][source]# Return docs and relevance scores in the range [0, 1]. 0 is dissimilar, 1 is most similar. class langchain.vectorstores.Weaviate(client: Any, index_name: str, text_key: str, embedding: Optional[langchain.embeddings.base.Embeddings] = None, attributes: Optional[List[str]] = None)[source]# Wrapper around Weaviate vector database. To use, you should have the weaviate-client python package installed. Example import weaviate from langchain.vectorstores import Weaviate client = weaviate.Client(url=os.environ["WEAVIATE_URL"], ...) weaviate = Weaviate(client, index_name, text_key)
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-72
weaviate = Weaviate(client, index_name, text_key) add_texts(texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any) → List[str][source]# Upload texts with metadata (properties) to Weaviate. classmethod from_texts(texts: List[str], embedding: langchain.embeddings.base.Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any) → langchain.vectorstores.weaviate.Weaviate[source]# Construct Weaviate wrapper from raw documents. This is a user-friendly interface that: Embeds documents. Creates a new index for the embeddings in the Weaviate instance. Adds the documents to the newly created Weaviate index. This is intended to be a quick way to get started. Example from langchain.vectorstores.weaviate import Weaviate from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() weaviate = Weaviate.from_texts( texts, embeddings, weaviate_url="http://localhost:8080" ) max_marginal_relevance_search(query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents.
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-73
among selected documents. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4. fetch_k – Number of Documents to fetch to pass to MMR algorithm. lambda_mult – Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns List of Documents selected by maximal marginal relevance. max_marginal_relevance_search_by_vector(embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Parameters embedding – Embedding to look up documents similar to. k – Number of Documents to return. Defaults to 4. fetch_k – Number of Documents to fetch to pass to MMR algorithm. lambda_mult – Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns List of Documents selected by maximal marginal relevance. similarity_search(query: str, k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]# Return docs most similar to query. Parameters query – Text to look up documents similar to. k – Number of Documents to return. Defaults to 4.
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-74
k – Number of Documents to return. Defaults to 4. Returns List of Documents most similar to the query. similarity_search_by_vector(embedding: List[float], k: int = 4, **kwargs: Any) → List[langchain.schema.Document][source]# Look up similar documents by embedding vector in Weaviate. class langchain.vectorstores.Zilliz(embedding_function: Embeddings, collection_name: str = 'LangChainCollection', connection_args: Optional[dict[str, Any]] = None, consistency_level: str = 'Session', index_params: Optional[dict] = None, search_params: Optional[dict] = None, drop_old: Optional[bool] = False)[source]# classmethod from_texts(texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, collection_name: str = 'LangChainCollection', connection_args: dict[str, Any] = {}, consistency_level: str = 'Session', index_params: Optional[dict] = None, search_params: Optional[dict] = None, drop_old: bool = False, **kwargs: Any) → Zilliz[source]# Create a Zilliz collection, indexes it with HNSW, and insert data. Parameters texts (List[str]) – Text data. embedding (Embeddings) – Embedding function.
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
b9476fe061db-75
embedding (Embeddings) – Embedding function. metadatas (Optional[List[dict]]) – Metadata for each text if it exists. Defaults to None. collection_name (str, optional) – Collection name to use. Defaults to “LangChainCollection”. connection_args (dict[str, Any], optional) – Connection args to use. Defaults to DEFAULT_MILVUS_CONNECTION. consistency_level (str, optional) – Which consistency level to use. Defaults to “Session”. index_params (Optional[dict], optional) – Which index_params to use. Defaults to None. search_params (Optional[dict], optional) – Which search params to use. Defaults to None. drop_old (Optional[bool], optional) – Whether to drop the collection with that name if it exists. Defaults to False. Returns Zilliz Vector Store Return type Zilliz previous Document Loaders next Retrievers By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 26, 2023.
/content/https://python.langchain.com/en/latest/reference/modules/vectorstores.html
aef5735bd363-0
.rst .pdf Document Transformers Document Transformers# Transform documents pydantic model langchain.document_transformers.EmbeddingsRedundantFilter[source]# Filter that drops redundant documents by comparing their embeddings. field embeddings: langchain.embeddings.base.Embeddings [Required]# Embeddings to use for embedding document contents. field similarity_fn: Callable = <function cosine_similarity># Similarity function for comparing documents. Function expected to take as input two matrices (List[List[float]]) and return a matrix of scores where higher values indicate greater similarity. field similarity_threshold: float = 0.95# Threshold for determining when two documents are similar enough to be considered redundant. async atransform_documents(documents: Sequence[langchain.schema.Document], **kwargs: Any) → Sequence[langchain.schema.Document][source]# Asynchronously transform a list of documents. transform_documents(documents: Sequence[langchain.schema.Document], **kwargs: Any) → Sequence[langchain.schema.Document][source]# Filter down documents. langchain.document_transformers.get_stateful_documents(documents: Sequence[langchain.schema.Document]) → Sequence[langchain.document_transformers._DocumentWithState][source]# previous Document Compressors next Memory By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 26, 2023.
/content/https://python.langchain.com/en/latest/reference/modules/document_transformers.html
1c188571ced0-0
.rst .pdf Python REPL Python REPL# For backwards compatibility. pydantic model langchain.python.PythonREPL[source]# Simulates a standalone Python REPL. field globals: Optional[Dict] [Optional] (alias '_globals')# field locals: Optional[Dict] [Optional] (alias '_locals')# run(command: str) → str[source]# Run command with own globals/locals and returns anything printed. By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 26, 2023.
/content/https://python.langchain.com/en/latest/reference/modules/python.html