prompt
stringlengths 43
25.9k
| completion
stringlengths 7
362
| api
stringlengths 18
90
|
---|---|---|
get_ipython().run_line_magic('pip', 'install --upgrade --quiet opaqueprompts langchain')
import os
os.environ["OPAQUEPROMPTS_API_KEY"] = "<OPAQUEPROMPTS_API_KEY>"
os.environ["OPENAI_API_KEY"] = "<OPENAI_API_KEY>"
from langchain.callbacks.stdout import StdOutCallbackHandler
from langchain.chains import LLMChain
from langchain.globals import set_debug, set_verbose
from langchain.memory import ConversationBufferWindowMemory
from langchain.prompts import PromptTemplate
from langchain_community.llms import OpaquePrompts
from langchain_openai import OpenAI
set_debug(True)
set_verbose(True)
prompt_template = """
As an AI assistant, you will answer questions according to given context.
Sensitive personal information in the question is masked for privacy.
For instance, if the original text says "Giana is good," it will be changed
to "PERSON_998 is good."
Here's how to handle these changes:
* Consider these masked phrases just as placeholders, but still refer to
them in a relevant way when answering.
* It's possible that different masked terms might mean the same thing.
Stick with the given term and don't modify it.
* All masked terms follow the "TYPE_ID" pattern.
* Please don't invent new masked terms. For instance, if you see "PERSON_998,"
don't come up with "PERSON_997" or "PERSON_999" unless they're already in the question.
Conversation History: ```{history}```
Context : ```During our recent meeting on February 23, 2023, at 10:30 AM,
John Doe provided me with his personal details. His email is johndoe@example.com
and his contact number is 650-456-7890. He lives in New York City, USA, and
belongs to the American nationality with Christian beliefs and a leaning towards
the Democratic party. He mentioned that he recently made a transaction using his
credit card 4111 1111 1111 1111 and transferred bitcoins to the wallet address
1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa. While discussing his European travels, he noted
down his IBAN as GB29 NWBK 6016 1331 9268 19. Additionally, he provided his website
as https://johndoeportfolio.com. John also discussed some of his US-specific details.
He said his bank account number is 1234567890123456 and his drivers license is Y12345678.
His ITIN is 987-65-4321, and he recently renewed his passport, the number for which is
123456789. He emphasized not to share his SSN, which is 123-45-6789. Furthermore, he
mentioned that he accesses his work files remotely through the IP 192.168.1.1 and has
a medical license number MED-123456. ```
Question: ```{question}```
"""
chain = LLMChain(
prompt=PromptTemplate.from_template(prompt_template),
llm=OpaquePrompts(base_llm=OpenAI()),
memory=ConversationBufferWindowMemory(k=2),
verbose=True,
)
print(
chain.run(
{
"question": """Write a message to remind John to do password reset for his website to stay secure."""
},
callbacks=[StdOutCallbackHandler()],
)
)
import langchain_community.utilities.opaqueprompts as op
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
prompt = (PromptTemplate.from_template(prompt_template),)
llm = OpenAI()
pg_chain = (
op.sanitize
| RunnablePassthrough.assign(
response=(lambda x: x["sanitized_input"]) | prompt | llm | | StrOutputParser() | langchain_core.output_parsers.StrOutputParser |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet meilisearch')
import getpass
import os
os.environ["MEILI_HTTP_ADDR"] = getpass.getpass("Meilisearch HTTP address and port:")
os.environ["MEILI_MASTER_KEY"] = getpass.getpass("Meilisearch API Key:")
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
from langchain_community.vectorstores import Meilisearch
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
embeddings = | OpenAIEmbeddings() | langchain_openai.OpenAIEmbeddings |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-nvidia-ai-endpoints')
import getpass
import os
if not os.environ.get("NVIDIA_API_KEY", "").startswith("nvapi-"):
nvapi_key = getpass.getpass("Enter your NVIDIA API key: ")
assert nvapi_key.startswith("nvapi-"), f"{nvapi_key[:5]}... is not a valid key"
os.environ["NVIDIA_API_KEY"] = nvapi_key
from langchain_nvidia_ai_endpoints import ChatNVIDIA
llm = ChatNVIDIA(model="mixtral_8x7b")
result = llm.invoke("Write a ballad about LangChain.")
print(result.content)
print(llm.batch(["What's 2*3?", "What's 2*6?"]))
for chunk in llm.stream("How far can a seagull fly in one day?"):
print(chunk.content, end="|")
async for chunk in llm.astream(
"How long does it take for monarch butterflies to migrate?"
):
print(chunk.content, end="|")
| ChatNVIDIA.get_available_models() | langchain_nvidia_ai_endpoints.ChatNVIDIA.get_available_models |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet marqo')
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Marqo
from langchain_text_splitters import CharacterTextSplitter
from langchain_community.document_loaders import TextLoader
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
import marqo
marqo_url = "http://localhost:8882" # if using marqo cloud replace with your endpoint (console.marqo.ai)
marqo_api_key = "" # if using marqo cloud replace with your api key (console.marqo.ai)
client = marqo.Client(url=marqo_url, api_key=marqo_api_key)
index_name = "langchain-demo"
docsearch = | Marqo.from_documents(docs, index_name=index_name) | langchain_community.vectorstores.Marqo.from_documents |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet sodapy')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pandas')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet geopandas')
import ast
import geopandas as gpd
import pandas as pd
from langchain_community.document_loaders import OpenCityDataLoader
dataset = "tmnf-yvry" # San Francisco crime data
loader = OpenCityDataLoader(city_id="data.sfgov.org", dataset_id=dataset, limit=5000)
docs = loader.load()
df = pd.DataFrame([ast.literal_eval(d.page_content) for d in docs])
df["Latitude"] = df["location"].apply(lambda loc: loc["coordinates"][1])
df["Longitude"] = df["location"].apply(lambda loc: loc["coordinates"][0])
gdf = gpd.GeoDataFrame(
df, geometry=gpd.points_from_xy(df.Longitude, df.Latitude), crs="EPSG:4326"
)
gdf = gdf[
(gdf["Longitude"] >= -123.173825)
& (gdf["Longitude"] <= -122.281780)
& (gdf["Latitude"] >= 37.623983)
& (gdf["Latitude"] <= 37.929824)
]
import matplotlib.pyplot as plt
sf = gpd.read_file("https://data.sfgov.org/resource/3psu-pn9h.geojson")
fig, ax = plt.subplots(figsize=(10, 10))
sf.plot(ax=ax, color="white", edgecolor="black")
gdf.plot(ax=ax, color="red", markersize=5)
plt.show()
from langchain_community.document_loaders import GeoDataFrameLoader
loader = | GeoDataFrameLoader(data_frame=gdf, page_content_column="geometry") | langchain_community.document_loaders.GeoDataFrameLoader |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet python-steam-api python-decouple')
import os
os.environ["STEAM_KEY"] = "xyz"
os.environ["STEAM_ID"] = "123"
os.environ["OPENAI_API_KEY"] = "abc"
from langchain.agents import AgentType, initialize_agent
from langchain_community.agent_toolkits.steam.toolkit import SteamToolkit
from langchain_community.utilities.steam import SteamWebAPIWrapper
from langchain_openai import OpenAI
llm = | OpenAI(temperature=0) | langchain_openai.OpenAI |
from langchain_community.chat_models import ChatDatabricks
from langchain_core.messages import HumanMessage
from mlflow.deployments import get_deploy_client
client = get_deploy_client("databricks")
secret = "secrets/<scope>/openai-api-key" # replace `<scope>` with your scope
name = "my-chat" # rename this if my-chat already exists
client.create_endpoint(
name=name,
config={
"served_entities": [
{
"name": "my-chat",
"external_model": {
"name": "gpt-4",
"provider": "openai",
"task": "llm/v1/chat",
"openai_config": {
"openai_api_key": "{{" + secret + "}}",
},
},
}
],
},
)
chat = ChatDatabricks(
target_uri="databricks",
endpoint=name,
temperature=0.1,
)
chat([HumanMessage(content="hello")])
from langchain_community.embeddings import DatabricksEmbeddings
embeddings = DatabricksEmbeddings(endpoint="databricks-bge-large-en")
embeddings.embed_query("hello")[:3]
from langchain_community.llms import Databricks
llm = Databricks(endpoint_name="dolly")
llm("How are you?")
llm("How are you?", stop=["."])
import os
import dbutils
os.environ["DATABRICKS_TOKEN"] = dbutils.secrets.get("myworkspace", "api_token")
llm = Databricks(host="myworkspace.cloud.databricks.com", endpoint_name="dolly")
llm("How are you?")
llm = Databricks(endpoint_name="dolly", model_kwargs={"temperature": 0.1})
llm("How are you?")
def transform_input(**request):
full_prompt = f"""{request["prompt"]}
Be Concise.
"""
request["prompt"] = full_prompt
return request
llm = Databricks(endpoint_name="dolly", transform_input_fn=transform_input)
llm("How are you?")
llm = | Databricks(cluster_driver_port="7777") | langchain_community.llms.Databricks |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet elasticsearch == 7.11.0')
import getpass
import os
os.environ["QIANFAN_AK"] = getpass.getpass("Your Qianfan AK:")
os.environ["QIANFAN_SK"] = getpass.getpass("Your Qianfan SK:")
from langchain_community.document_loaders import TextLoader
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../../state_of_the_union.txt")
documents = loader.load()
text_splitter = | CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) | langchain_text_splitters.CharacterTextSplitter |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai duckduckgo-search')
from langchain.tools import DuckDuckGoSearchRun
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
search = DuckDuckGoSearchRun()
template = """turn the following user input into a search query for a search engine:
{input}"""
prompt = | ChatPromptTemplate.from_template(template) | langchain_core.prompts.ChatPromptTemplate.from_template |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet airbyte-source-shopify')
from langchain_community.document_loaders.airbyte import AirbyteShopifyLoader
config = {
}
loader = AirbyteShopifyLoader(
config=config, stream_name="orders"
) # check the documentation linked above for a list of all streams
docs = loader.load()
docs_iterator = loader.lazy_load()
from langchain.docstore.document import Document
def handle_record(record, id):
return | Document(page_content=record.data["title"], metadata=record.data) | langchain.docstore.document.Document |
import re
from typing import Union
from langchain.agents import (
AgentExecutor,
AgentOutputParser,
LLMSingleActionAgent,
)
from langchain.chains import LLMChain
from langchain.prompts import StringPromptTemplate
from langchain_community.agent_toolkits import NLAToolkit
from langchain_community.tools.plugin import AIPlugin
from langchain_core.agents import AgentAction, AgentFinish
from langchain_openai import OpenAI
llm = OpenAI(temperature=0)
urls = [
"https://datasette.io/.well-known/ai-plugin.json",
"https://api.speak.com/.well-known/ai-plugin.json",
"https://www.wolframalpha.com/.well-known/ai-plugin.json",
"https://www.zapier.com/.well-known/ai-plugin.json",
"https://www.klarna.com/.well-known/ai-plugin.json",
"https://www.joinmilo.com/.well-known/ai-plugin.json",
"https://slack.com/.well-known/ai-plugin.json",
"https://schooldigger.com/.well-known/ai-plugin.json",
]
AI_PLUGINS = [ | AIPlugin.from_url(url) | langchain_community.tools.plugin.AIPlugin.from_url |
get_ipython().run_line_magic('pip', 'install -qU langchain langchain-openai langchain-anthropic langchain-community wikipedia')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass()
os.environ["ANTHROPIC_API_KEY"] = getpass.getpass()
from langchain_community.retrievers import WikipediaRetriever
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
wiki = WikipediaRetriever(top_k_results=6, doc_content_chars_max=2000)
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You're a helpful AI assistant. Given a user question and some Wikipedia article snippets, answer the user question. If none of the articles answer the question, just say you don't know.\n\nHere are the Wikipedia articles:{context}",
),
("human", "{question}"),
]
)
prompt.pretty_print()
from operator import itemgetter
from typing import List
from langchain_core.documents import Document
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import (
RunnableLambda,
RunnableParallel,
RunnablePassthrough,
)
def format_docs(docs: List[Document]) -> str:
"""Convert Documents to a single string.:"""
formatted = [
f"Article Title: {doc.metadata['title']}\nArticle Snippet: {doc.page_content}"
for doc in docs
]
return "\n\n" + "\n\n".join(formatted)
format = itemgetter("docs") | RunnableLambda(format_docs)
answer = prompt | llm | StrOutputParser()
chain = (
RunnableParallel(question=RunnablePassthrough(), docs=wiki)
.assign(context=format)
.assign(answer=answer)
.pick(["answer", "docs"])
)
chain.invoke("How fast are cheetahs?")
from langchain_core.pydantic_v1 import BaseModel, Field
class cited_answer(BaseModel):
"""Answer the user question based only on the given sources, and cite the sources used."""
answer: str = Field(
...,
description="The answer to the user question, which is based only on the given sources.",
)
citations: List[int] = Field(
...,
description="The integer IDs of the SPECIFIC sources which justify the answer.",
)
llm_with_tool = llm.bind_tools(
[cited_answer],
tool_choice="cited_answer",
)
example_q = """What Brian's height?
Source: 1
Information: Suzy is 6'2"
Source: 2
Information: Jeremiah is blonde
Source: 3
Information: Brian is 3 inches shorted than Suzy"""
llm_with_tool.invoke(example_q)
from langchain.output_parsers.openai_tools import JsonOutputKeyToolsParser
output_parser = JsonOutputKeyToolsParser(key_name="cited_answer", return_single=True)
(llm_with_tool | output_parser).invoke(example_q)
def format_docs_with_id(docs: List[Document]) -> str:
formatted = [
f"Source ID: {i}\nArticle Title: {doc.metadata['title']}\nArticle Snippet: {doc.page_content}"
for i, doc in enumerate(docs)
]
return "\n\n" + "\n\n".join(formatted)
format_1 = itemgetter("docs") | RunnableLambda(format_docs_with_id)
answer_1 = prompt | llm_with_tool | output_parser
chain_1 = (
RunnableParallel(question=RunnablePassthrough(), docs=wiki)
.assign(context=format_1)
.assign(cited_answer=answer_1)
.pick(["cited_answer", "docs"])
)
chain_1.invoke("How fast are cheetahs?")
class Citation(BaseModel):
source_id: int = Field(
...,
description="The integer ID of a SPECIFIC source which justifies the answer.",
)
quote: str = Field(
...,
description="The VERBATIM quote from the specified source that justifies the answer.",
)
class quoted_answer(BaseModel):
"""Answer the user question based only on the given sources, and cite the sources used."""
answer: str = Field(
...,
description="The answer to the user question, which is based only on the given sources.",
)
citations: List[Citation] = Field(
..., description="Citations from the given sources that justify the answer."
)
output_parser_2 = | JsonOutputKeyToolsParser(key_name="quoted_answer", return_single=True) | langchain.output_parsers.openai_tools.JsonOutputKeyToolsParser |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-pinecone langchain-openai langchain')
from langchain_community.document_loaders import TextLoader
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
from langchain_pinecone import PineconeVectorStore
index_name = "langchain-test-index"
docsearch = PineconeVectorStore.from_documents(docs, embeddings, index_name=index_name)
query = "What did the president say about Ketanji Brown Jackson"
docs = docsearch.similarity_search(query)
print(docs[0].page_content)
vectorstore = | PineconeVectorStore(index_name=index_name, embedding=embeddings) | langchain_pinecone.PineconeVectorStore |
STAGE_BUCKET = "<bucket-name>"
get_ipython().run_cell_magic('bash', ' -s "$STAGE_BUCKET"', '\nrm -rf data\nmkdir -p data\ncd data\necho getting org ontology and sample org instances\nwget http://www.w3.org/ns/org.ttl \nwget https://raw.githubusercontent.com/aws-samples/amazon-neptune-ontology-example-blog/main/data/example_org.ttl \n\necho Copying org ttl to S3\naws s3 cp org.ttl s3://$1/org.ttl\naws s3 cp example_org.ttl s3://$1/example_org.ttl\n')
get_ipython().run_line_magic('load', '-s s3://{STAGE_BUCKET} -f turtle --store-to loadres --run')
get_ipython().run_line_magic('load_status', "{loadres['payload']['loadId']} --errors --details")
EXAMPLES = """
<question>
Find organizations.
</question>
<sparql>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX org: <http://www.w3.org/ns/org#>
select ?org ?orgName where {{
?org rdfs:label ?orgName .
}}
</sparql>
<question>
Find sites of an organization
</question>
<sparql>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX org: <http://www.w3.org/ns/org#>
select ?org ?orgName ?siteName where {{
?org rdfs:label ?orgName .
?org org:hasSite/rdfs:label ?siteName .
}}
</sparql>
<question>
Find suborganizations of an organization
</question>
<sparql>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX org: <http://www.w3.org/ns/org#>
select ?org ?orgName ?subName where {{
?org rdfs:label ?orgName .
?org org:hasSubOrganization/rdfs:label ?subName .
}}
</sparql>
<question>
Find organizational units of an organization
</question>
<sparql>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX org: <http://www.w3.org/ns/org#>
select ?org ?orgName ?unitName where {{
?org rdfs:label ?orgName .
?org org:hasUnit/rdfs:label ?unitName .
}}
</sparql>
<question>
Find members of an organization. Also find their manager, or the member they report to.
</question>
<sparql>
PREFIX org: <http://www.w3.org/ns/org#>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
select * where {{
?person rdf:type foaf:Person .
?person org:memberOf ?org .
OPTIONAL {{ ?person foaf:firstName ?firstName . }}
OPTIONAL {{ ?person foaf:family_name ?lastName . }}
OPTIONAL {{ ?person org:reportsTo ??manager }} .
}}
</sparql>
<question>
Find change events, such as mergers and acquisitions, of an organization
</question>
<sparql>
PREFIX org: <http://www.w3.org/ns/org#>
select ?event ?prop ?obj where {{
?org rdfs:label ?orgName .
?event rdf:type org:ChangeEvent .
?event org:originalOrganization ?origOrg .
?event org:resultingOrganization ?resultingOrg .
}}
</sparql>
"""
import boto3
from langchain.chains.graph_qa.neptune_sparql import NeptuneSparqlQAChain
from langchain_community.chat_models import BedrockChat
from langchain_community.graphs import NeptuneRdfGraph
host = "<neptune-host>"
port = "<neptune-port>"
region = "us-east-1" # specify region
graph = NeptuneRdfGraph(
host=host, port=port, use_iam_auth=True, region_name=region, hide_comments=True
)
schema_elements = graph.get_schema_elements
graph.load_from_schema_elements(schema_elements)
bedrock_client = boto3.client("bedrock-runtime")
llm = | BedrockChat(model_id="anthropic.claude-v2", client=bedrock_client) | langchain_community.chat_models.BedrockChat |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-community langchainhub langchain-openai faiss-cpu')
from langchain_community.document_loaders import TextLoader
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
from langchain_community.vectorstores import FAISS
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
db = FAISS.from_documents(texts, embeddings)
retriever = db.as_retriever()
from langchain.tools.retriever import create_retriever_tool
tool = create_retriever_tool(
retriever,
"search_state_of_union",
"Searches and returns excerpts from the 2022 State of the Union.",
)
tools = [tool]
from langchain import hub
prompt = hub.pull("hwchase17/openai-tools-agent")
prompt.messages
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(temperature=0)
from langchain.agents import AgentExecutor, create_openai_tools_agent
agent = | create_openai_tools_agent(llm, tools, prompt) | langchain.agents.create_openai_tools_agent |
get_ipython().system(' pip install langchain unstructured[all-docs] pydantic lxml')
path = "/Users/rlm/Desktop/Papers/LLaVA/"
from typing import Any
from pydantic import BaseModel
from unstructured.partition.pdf import partition_pdf
raw_pdf_elements = partition_pdf(
filename=path + "LLaVA.pdf",
extract_images_in_pdf=True,
infer_table_structure=True,
chunking_strategy="by_title",
max_characters=4000,
new_after_n_chars=3800,
combine_text_under_n_chars=2000,
image_output_dir_path=path,
)
category_counts = {}
for element in raw_pdf_elements:
category = str(type(element))
if category in category_counts:
category_counts[category] += 1
else:
category_counts[category] = 1
unique_categories = set(category_counts.keys())
category_counts
class Element(BaseModel):
type: str
text: Any
categorized_elements = []
for element in raw_pdf_elements:
if "unstructured.documents.elements.Table" in str(type(element)):
categorized_elements.append(Element(type="table", text=str(element)))
elif "unstructured.documents.elements.CompositeElement" in str(type(element)):
categorized_elements.append(Element(type="text", text=str(element)))
table_elements = [e for e in categorized_elements if e.type == "table"]
print(len(table_elements))
text_elements = [e for e in categorized_elements if e.type == "text"]
print(len(text_elements))
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
prompt_text = """You are an assistant tasked with summarizing tables and text. \
Give a concise summary of the table or text. Table or text chunk: {element} """
prompt = ChatPromptTemplate.from_template(prompt_text)
model = ChatOpenAI(temperature=0, model="gpt-4")
summarize_chain = {"element": lambda x: x} | prompt | model | StrOutputParser()
texts = [i.text for i in text_elements]
text_summaries = summarize_chain.batch(texts, {"max_concurrency": 5})
tables = [i.text for i in table_elements]
table_summaries = summarize_chain.batch(tables, {"max_concurrency": 5})
get_ipython().run_cell_magic('bash', '', '\n# Define the directory containing the images\nIMG_DIR=~/Desktop/Papers/LLaVA/\n\n# Loop through each image in the directory\nfor img in "${IMG_DIR}"*.jpg; do\n # Extract the base name of the image without extension\n base_name=$(basename "$img" .jpg)\n\n # Define the output file name based on the image name\n output_file="${IMG_DIR}${base_name}.txt"\n\n # Execute the command and save the output to the defined output file\n /Users/rlm/Desktop/Code/llama.cpp/bin/llava -m ../models/llava-7b/ggml-model-q5_k.gguf --mmproj ../models/llava-7b/mmproj-model-f16.gguf --temp 0.1 -p "Describe the image in detail. Be specific about graphs, such as bar plots." --image "$img" > "$output_file"\n\ndone\n')
import glob
import os
file_paths = glob.glob(os.path.expanduser(os.path.join(path, "*.txt")))
img_summaries = []
for file_path in file_paths:
with open(file_path, "r") as file:
img_summaries.append(file.read())
logging_header = "clip_model_load: total allocated memory: 201.27 MB\n\n"
cleaned_img_summary = [s.split(logging_header, 1)[1].strip() for s in img_summaries]
import uuid
from langchain.retrievers.multi_vector import MultiVectorRetriever
from langchain.storage import InMemoryStore
from langchain_community.vectorstores import Chroma
from langchain_core.documents import Document
from langchain_openai import OpenAIEmbeddings
vectorstore = Chroma(collection_name="summaries", embedding_function=OpenAIEmbeddings())
store = InMemoryStore()
id_key = "doc_id"
retriever = MultiVectorRetriever(
vectorstore=vectorstore,
docstore=store,
id_key=id_key,
)
doc_ids = [str(uuid.uuid4()) for _ in texts]
summary_texts = [
Document(page_content=s, metadata={id_key: doc_ids[i]})
for i, s in enumerate(text_summaries)
]
retriever.vectorstore.add_documents(summary_texts)
retriever.docstore.mset(list(zip(doc_ids, texts)))
table_ids = [str(uuid.uuid4()) for _ in tables]
summary_tables = [
Document(page_content=s, metadata={id_key: table_ids[i]})
for i, s in enumerate(table_summaries)
]
retriever.vectorstore.add_documents(summary_tables)
retriever.docstore.mset(list(zip(table_ids, tables)))
img_ids = [str(uuid.uuid4()) for _ in cleaned_img_summary]
summary_img = [
Document(page_content=s, metadata={id_key: img_ids[i]})
for i, s in enumerate(cleaned_img_summary)
]
retriever.vectorstore.add_documents(summary_img)
retriever.docstore.mset(list(zip(img_ids, cleaned_img_summary)))
img_ids = [str(uuid.uuid4()) for _ in cleaned_img_summary]
summary_img = [
| Document(page_content=s, metadata={id_key: img_ids[i]}) | langchain_core.documents.Document |
from IPython.display import SVG
from langchain_experimental.cpal.base import CPALChain
from langchain_experimental.pal_chain import PALChain
from langchain_openai import OpenAI
llm = | OpenAI(temperature=0, max_tokens=512) | langchain_openai.OpenAI |
from langchain.agents import AgentType, initialize_agent, load_tools
from langchain.tools import AIPluginTool
from langchain_openai import ChatOpenAI
tool = | AIPluginTool.from_plugin_url("https://www.klarna.com/.well-known/ai-plugin.json") | langchain.tools.AIPluginTool.from_plugin_url |
import functools
import random
from collections import OrderedDict
from typing import Callable, List
import tenacity
from langchain.output_parsers import RegexParser
from langchain.prompts import (
PromptTemplate,
)
from langchain.schema import (
HumanMessage,
SystemMessage,
)
from langchain_openai import ChatOpenAI
class DialogueAgent:
def __init__(
self,
name: str,
system_message: SystemMessage,
model: ChatOpenAI,
) -> None:
self.name = name
self.system_message = system_message
self.model = model
self.prefix = f"{self.name}: "
self.reset()
def reset(self):
self.message_history = ["Here is the conversation so far."]
def send(self) -> str:
"""
Applies the chatmodel to the message history
and returns the message string
"""
message = self.model(
[
self.system_message,
HumanMessage(content="\n".join(self.message_history + [self.prefix])),
]
)
return message.content
def receive(self, name: str, message: str) -> None:
"""
Concatenates {message} spoken by {name} into message history
"""
self.message_history.append(f"{name}: {message}")
class DialogueSimulator:
def __init__(
self,
agents: List[DialogueAgent],
selection_function: Callable[[int, List[DialogueAgent]], int],
) -> None:
self.agents = agents
self._step = 0
self.select_next_speaker = selection_function
def reset(self):
for agent in self.agents:
agent.reset()
def inject(self, name: str, message: str):
"""
Initiates the conversation with a {message} from {name}
"""
for agent in self.agents:
agent.receive(name, message)
self._step += 1
def step(self) -> tuple[str, str]:
speaker_idx = self.select_next_speaker(self._step, self.agents)
speaker = self.agents[speaker_idx]
message = speaker.send()
for receiver in self.agents:
receiver.receive(speaker.name, message)
self._step += 1
return speaker.name, message
class IntegerOutputParser(RegexParser):
def get_format_instructions(self) -> str:
return "Your response should be an integer delimited by angled brackets, like this: <int>."
class DirectorDialogueAgent(DialogueAgent):
def __init__(
self,
name,
system_message: SystemMessage,
model: ChatOpenAI,
speakers: List[DialogueAgent],
stopping_probability: float,
) -> None:
super().__init__(name, system_message, model)
self.speakers = speakers
self.next_speaker = ""
self.stop = False
self.stopping_probability = stopping_probability
self.termination_clause = "Finish the conversation by stating a concluding message and thanking everyone."
self.continuation_clause = "Do not end the conversation. Keep the conversation going by adding your own ideas."
self.response_prompt_template = PromptTemplate(
input_variables=["message_history", "termination_clause"],
template=f"""{{message_history}}
Follow up with an insightful comment.
{{termination_clause}}
{self.prefix}
""",
)
self.choice_parser = IntegerOutputParser(
regex=r"<(\d+)>", output_keys=["choice"], default_output_key="choice"
)
self.choose_next_speaker_prompt_template = PromptTemplate(
input_variables=["message_history", "speaker_names"],
template=f"""{{message_history}}
Given the above conversation, select the next speaker by choosing index next to their name:
{{speaker_names}}
{self.choice_parser.get_format_instructions()}
Do nothing else.
""",
)
self.prompt_next_speaker_prompt_template = PromptTemplate(
input_variables=["message_history", "next_speaker"],
template=f"""{{message_history}}
The next speaker is {{next_speaker}}.
Prompt the next speaker to speak with an insightful question.
{self.prefix}
""",
)
def _generate_response(self):
sample = random.uniform(0, 1)
self.stop = sample < self.stopping_probability
print(f"\tStop? {self.stop}\n")
response_prompt = self.response_prompt_template.format(
message_history="\n".join(self.message_history),
termination_clause=self.termination_clause if self.stop else "",
)
self.response = self.model(
[
self.system_message,
HumanMessage(content=response_prompt),
]
).content
return self.response
@tenacity.retry(
stop=tenacity.stop_after_attempt(2),
wait=tenacity.wait_none(), # No waiting time between retries
retry=tenacity.retry_if_exception_type(ValueError),
before_sleep=lambda retry_state: print(
f"ValueError occurred: {retry_state.outcome.exception()}, retrying..."
),
retry_error_callback=lambda retry_state: 0,
) # Default value when all retries are exhausted
def _choose_next_speaker(self) -> str:
speaker_names = "\n".join(
[f"{idx}: {name}" for idx, name in enumerate(self.speakers)]
)
choice_prompt = self.choose_next_speaker_prompt_template.format(
message_history="\n".join(
self.message_history + [self.prefix] + [self.response]
),
speaker_names=speaker_names,
)
choice_string = self.model(
[
self.system_message,
HumanMessage(content=choice_prompt),
]
).content
choice = int(self.choice_parser.parse(choice_string)["choice"])
return choice
def select_next_speaker(self):
return self.chosen_speaker_id
def send(self) -> str:
"""
Applies the chatmodel to the message history
and returns the message string
"""
self.response = self._generate_response()
if self.stop:
message = self.response
else:
self.chosen_speaker_id = self._choose_next_speaker()
self.next_speaker = self.speakers[self.chosen_speaker_id]
print(f"\tNext speaker: {self.next_speaker}\n")
next_prompt = self.prompt_next_speaker_prompt_template.format(
message_history="\n".join(
self.message_history + [self.prefix] + [self.response]
),
next_speaker=self.next_speaker,
)
message = self.model(
[
self.system_message,
HumanMessage(content=next_prompt),
]
).content
message = " ".join([self.response, message])
return message
topic = "The New Workout Trend: Competitive Sitting - How Laziness Became the Next Fitness Craze"
director_name = "Jon Stewart"
agent_summaries = OrderedDict(
{
"Jon Stewart": ("Host of the Daily Show", "New York"),
"Samantha Bee": ("Hollywood Correspondent", "Los Angeles"),
"Aasif Mandvi": ("CIA Correspondent", "Washington D.C."),
"Ronny Chieng": ("Average American Correspondent", "Cleveland, Ohio"),
}
)
word_limit = 50
agent_summary_string = "\n- ".join(
[""]
+ [
f"{name}: {role}, located in {location}"
for name, (role, location) in agent_summaries.items()
]
)
conversation_description = f"""This is a Daily Show episode discussing the following topic: {topic}.
The episode features {agent_summary_string}."""
agent_descriptor_system_message = SystemMessage(
content="You can add detail to the description of each person."
)
def generate_agent_description(agent_name, agent_role, agent_location):
agent_specifier_prompt = [
agent_descriptor_system_message,
HumanMessage(
content=f"""{conversation_description}
Please reply with a creative description of {agent_name}, who is a {agent_role} in {agent_location}, that emphasizes their particular role and location.
Speak directly to {agent_name} in {word_limit} words or less.
Do not add anything else."""
),
]
agent_description = ChatOpenAI(temperature=1.0)(agent_specifier_prompt).content
return agent_description
def generate_agent_header(agent_name, agent_role, agent_location, agent_description):
return f"""{conversation_description}
Your name is {agent_name}, your role is {agent_role}, and you are located in {agent_location}.
Your description is as follows: {agent_description}
You are discussing the topic: {topic}.
Your goal is to provide the most informative, creative, and novel perspectives of the topic from the perspective of your role and your location.
"""
def generate_agent_system_message(agent_name, agent_header):
return SystemMessage(
content=(
f"""{agent_header}
You will speak in the style of {agent_name}, and exaggerate your personality.
Do not say the same things over and over again.
Speak in the first person from the perspective of {agent_name}
For describing your own body movements, wrap your description in '*'.
Do not change roles!
Do not speak from the perspective of anyone else.
Speak only from the perspective of {agent_name}.
Stop speaking the moment you finish speaking from your perspective.
Never forget to keep your response to {word_limit} words!
Do not add anything else.
"""
)
)
agent_descriptions = [
generate_agent_description(name, role, location)
for name, (role, location) in agent_summaries.items()
]
agent_headers = [
generate_agent_header(name, role, location, description)
for (name, (role, location)), description in zip(
agent_summaries.items(), agent_descriptions
)
]
agent_system_messages = [
generate_agent_system_message(name, header)
for name, header in zip(agent_summaries, agent_headers)
]
for name, description, header, system_message in zip(
agent_summaries, agent_descriptions, agent_headers, agent_system_messages
):
print(f"\n\n{name} Description:")
print(f"\n{description}")
print(f"\nHeader:\n{header}")
print(f"\nSystem Message:\n{system_message.content}")
topic_specifier_prompt = [
SystemMessage(content="You can make a task more specific."),
HumanMessage(
content=f"""{conversation_description}
Please elaborate on the topic.
Frame the topic as a single question to be answered.
Be creative and imaginative.
Please reply with the specified topic in {word_limit} words or less.
Do not add anything else."""
),
]
specified_topic = ChatOpenAI(temperature=1.0)(topic_specifier_prompt).content
print(f"Original topic:\n{topic}\n")
print(f"Detailed topic:\n{specified_topic}\n")
def select_next_speaker(
step: int, agents: List[DialogueAgent], director: DirectorDialogueAgent
) -> int:
"""
If the step is even, then select the director
Otherwise, the director selects the next speaker.
"""
if step % 2 == 1:
idx = 0
else:
idx = director.select_next_speaker() + 1 # +1 because we excluded the director
return idx
director = DirectorDialogueAgent(
name=director_name,
system_message=agent_system_messages[0],
model= | ChatOpenAI(temperature=0.2) | langchain_openai.ChatOpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-text-splitters tiktoken')
with open("../../state_of_the_union.txt") as f:
state_of_the_union = f.read()
from langchain_text_splitters import CharacterTextSplitter
text_splitter = CharacterTextSplitter.from_tiktoken_encoder(
chunk_size=100, chunk_overlap=0
)
texts = text_splitter.split_text(state_of_the_union)
print(texts[0])
from langchain_text_splitters import TokenTextSplitter
text_splitter = | TokenTextSplitter(chunk_size=10, chunk_overlap=0) | langchain_text_splitters.TokenTextSplitter |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet "docarray[hnswlib]"')
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import DocArrayHnswSearch
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
documents = TextLoader("../../modules/state_of_the_union.txt").load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = | OpenAIEmbeddings() | langchain_openai.OpenAIEmbeddings |
import getpass
import os
os.environ["POLYGON_API_KEY"] = getpass.getpass()
from langchain_community.tools.polygon.financials import PolygonFinancials
from langchain_community.tools.polygon.last_quote import PolygonLastQuote
from langchain_community.tools.polygon.ticker_news import PolygonTickerNews
from langchain_community.utilities.polygon import PolygonAPIWrapper
api_wrapper = | PolygonAPIWrapper() | langchain_community.utilities.polygon.PolygonAPIWrapper |
get_ipython().system('pip install --quiet langchain_experimental langchain_openai')
with open("../../state_of_the_union.txt") as f:
state_of_the_union = f.read()
from langchain_experimental.text_splitter import SemanticChunker
from langchain_openai.embeddings import OpenAIEmbeddings
text_splitter = SemanticChunker( | OpenAIEmbeddings() | langchain_openai.embeddings.OpenAIEmbeddings |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-community langchainhub langchain-openai faiss-cpu')
from langchain_community.document_loaders import TextLoader
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
from langchain_community.vectorstores import FAISS
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
text_splitter = | CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) | langchain_text_splitters.CharacterTextSplitter |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain label-studio label-studio-sdk langchain-openai')
import os
os.environ["LABEL_STUDIO_URL"] = "<YOUR-LABEL-STUDIO-URL>" # e.g. http://localhost:8080
os.environ["LABEL_STUDIO_API_KEY"] = "<YOUR-LABEL-STUDIO-API-KEY>"
os.environ["OPENAI_API_KEY"] = "<YOUR-OPENAI-API-KEY>"
from langchain.callbacks import LabelStudioCallbackHandler
from langchain_openai import OpenAI
llm = OpenAI(
temperature=0, callbacks=[LabelStudioCallbackHandler(project_name="My Project")]
)
print(llm("Tell me a joke"))
from langchain.callbacks import LabelStudioCallbackHandler
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_openai import ChatOpenAI
chat_llm = ChatOpenAI(
callbacks=[
LabelStudioCallbackHandler(
mode="chat",
project_name="New Project with Chat",
)
]
)
llm_results = chat_llm(
[
SystemMessage(content="Always use a lot of emojis"),
| HumanMessage(content="Tell me a joke") | langchain_core.messages.HumanMessage |
from langchain_community.chat_models import ChatDatabricks
from langchain_core.messages import HumanMessage
from mlflow.deployments import get_deploy_client
client = get_deploy_client("databricks")
secret = "secrets/<scope>/openai-api-key" # replace `<scope>` with your scope
name = "my-chat" # rename this if my-chat already exists
client.create_endpoint(
name=name,
config={
"served_entities": [
{
"name": "my-chat",
"external_model": {
"name": "gpt-4",
"provider": "openai",
"task": "llm/v1/chat",
"openai_config": {
"openai_api_key": "{{" + secret + "}}",
},
},
}
],
},
)
chat = ChatDatabricks(
target_uri="databricks",
endpoint=name,
temperature=0.1,
)
chat([ | HumanMessage(content="hello") | langchain_core.messages.HumanMessage |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet marqo')
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Marqo
from langchain_text_splitters import CharacterTextSplitter
from langchain_community.document_loaders import TextLoader
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
import marqo
marqo_url = "http://localhost:8882" # if using marqo cloud replace with your endpoint (console.marqo.ai)
marqo_api_key = "" # if using marqo cloud replace with your api key (console.marqo.ai)
client = marqo.Client(url=marqo_url, api_key=marqo_api_key)
index_name = "langchain-demo"
docsearch = Marqo.from_documents(docs, index_name=index_name)
query = "What did the president say about Ketanji Brown Jackson"
result_docs = docsearch.similarity_search(query)
print(result_docs[0].page_content)
result_docs = docsearch.similarity_search_with_score(query)
print(result_docs[0][0].page_content, result_docs[0][1], sep="\n")
index_name = "langchain-multimodal-demo"
try:
client.delete_index(index_name)
except Exception:
print(f"Creating {index_name}")
settings = {"treat_urls_and_pointers_as_images": True, "model": "ViT-L/14"}
client.create_index(index_name, **settings)
client.index(index_name).add_documents(
[
{
"caption": "Bus",
"image": "https://raw.githubusercontent.com/marqo-ai/marqo/mainline/examples/ImageSearchGuide/data/image4.jpg",
},
{
"caption": "Plane",
"image": "https://raw.githubusercontent.com/marqo-ai/marqo/mainline/examples/ImageSearchGuide/data/image2.jpg",
},
],
)
def get_content(res):
"""Helper to format Marqo's documents into text to be used as page_content"""
return f"{res['caption']}: {res['image']}"
docsearch = Marqo(client, index_name, page_content_builder=get_content)
query = "vehicles that fly"
doc_results = docsearch.similarity_search(query)
for doc in doc_results:
print(doc.page_content)
index_name = "langchain-byo-index-demo"
try:
client.delete_index(index_name)
except Exception:
print(f"Creating {index_name}")
client.create_index(index_name)
client.index(index_name).add_documents(
[
{
"Title": "Smartphone",
"Description": "A smartphone is a portable computer device that combines mobile telephone "
"functions and computing functions into one unit.",
},
{
"Title": "Telephone",
"Description": "A telephone is a telecommunications device that permits two or more users to"
"conduct a conversation when they are too far apart to be easily heard directly.",
},
],
)
def get_content(res):
"""Helper to format Marqo's documents into text to be used as page_content"""
if "text" in res:
return res["text"]
return res["Description"]
docsearch = Marqo(client, index_name, page_content_builder=get_content)
docsearch.add_texts(["This is a document that is about elephants"])
query = "modern communications devices"
doc_results = docsearch.similarity_search(query)
print(doc_results[0].page_content)
query = "elephants"
doc_results = docsearch.similarity_search(query, page_content_builder=get_content)
print(doc_results[0].page_content)
query = {"communications devices": 1.0}
doc_results = docsearch.similarity_search(query)
print(doc_results[0].page_content)
query = {"communications devices": 1.0, "technology post 2000": -1.0}
doc_results = docsearch.similarity_search(query)
print(doc_results[0].page_content)
import getpass
import os
from langchain.chains import RetrievalQAWithSourcesChain
from langchain_openai import OpenAI
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
with open("../../modules/state_of_the_union.txt") as f:
state_of_the_union = f.read()
text_splitter = | CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) | langchain_text_splitters.CharacterTextSplitter |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-core langchain langchain-openai')
from langchain.utils.math import cosine_similarity
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import PromptTemplate
from langchain_core.runnables import RunnableLambda, RunnablePassthrough
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
physics_template = """You are a very smart physics professor. \
You are great at answering questions about physics in a concise and easy to understand manner. \
When you don't know the answer to a question you admit that you don't know.
Here is a question:
{query}"""
math_template = """You are a very good mathematician. You are great at answering math questions. \
You are so good because you are able to break down hard problems into their component parts, \
answer the component parts, and then put them together to answer the broader question.
Here is a question:
{query}"""
embeddings = OpenAIEmbeddings()
prompt_templates = [physics_template, math_template]
prompt_embeddings = embeddings.embed_documents(prompt_templates)
def prompt_router(input):
query_embedding = embeddings.embed_query(input["query"])
similarity = cosine_similarity([query_embedding], prompt_embeddings)[0]
most_similar = prompt_templates[similarity.argmax()]
print("Using MATH" if most_similar == math_template else "Using PHYSICS")
return PromptTemplate.from_template(most_similar)
chain = (
{"query": | RunnablePassthrough() | langchain_core.runnables.RunnablePassthrough |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet sagemaker')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-openai')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet google-search-results')
import os
os.environ["OPENAI_API_KEY"] = "<ADD-KEY-HERE>"
os.environ["SERPAPI_API_KEY"] = "<ADD-KEY-HERE>"
from langchain.agents import initialize_agent, load_tools
from langchain.callbacks import SageMakerCallbackHandler
from langchain.chains import LLMChain, SimpleSequentialChain
from langchain.prompts import PromptTemplate
from langchain_openai import OpenAI
from sagemaker.analytics import ExperimentAnalytics
from sagemaker.experiments.run import Run
from sagemaker.session import Session
HPARAMS = {
"temperature": 0.1,
"model_name": "gpt-3.5-turbo-instruct",
}
BUCKET_NAME = None
EXPERIMENT_NAME = "langchain-sagemaker-tracker"
session = Session(default_bucket=BUCKET_NAME)
RUN_NAME = "run-scenario-1"
PROMPT_TEMPLATE = "tell me a joke about {topic}"
INPUT_VARIABLES = {"topic": "fish"}
with Run(
experiment_name=EXPERIMENT_NAME, run_name=RUN_NAME, sagemaker_session=session
) as run:
sagemaker_callback = SageMakerCallbackHandler(run)
llm = OpenAI(callbacks=[sagemaker_callback], **HPARAMS)
prompt = PromptTemplate.from_template(template=PROMPT_TEMPLATE)
chain = LLMChain(llm=llm, prompt=prompt, callbacks=[sagemaker_callback])
chain.run(**INPUT_VARIABLES)
sagemaker_callback.flush_tracker()
RUN_NAME = "run-scenario-2"
PROMPT_TEMPLATE_1 = """You are a playwright. Given the title of play, it is your job to write a synopsis for that title.
Title: {title}
Playwright: This is a synopsis for the above play:"""
PROMPT_TEMPLATE_2 = """You are a play critic from the New York Times. Given the synopsis of play, it is your job to write a review for that play.
Play Synopsis: {synopsis}
Review from a New York Times play critic of the above play:"""
INPUT_VARIABLES = {
"input": "documentary about good video games that push the boundary of game design"
}
with Run(
experiment_name=EXPERIMENT_NAME, run_name=RUN_NAME, sagemaker_session=session
) as run:
sagemaker_callback = SageMakerCallbackHandler(run)
prompt_template1 = PromptTemplate.from_template(template=PROMPT_TEMPLATE_1)
prompt_template2 = | PromptTemplate.from_template(template=PROMPT_TEMPLATE_2) | langchain.prompts.PromptTemplate.from_template |
from langchain_community.llms.human import HumanInputLLM
from langchain.agents import AgentType, initialize_agent, load_tools
get_ipython().run_line_magic('pip', 'install wikipedia')
tools = | load_tools(["wikipedia"]) | langchain.agents.load_tools |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet vald-client-python')
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import Vald
from langchain_text_splitters import CharacterTextSplitter
raw_documents = TextLoader("state_of_the_union.txt").load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
documents = text_splitter.split_documents(raw_documents)
embeddings = HuggingFaceEmbeddings()
db = Vald.from_documents(documents, embeddings, host="localhost", port=8080)
query = "What did the president say about Ketanji Brown Jackson"
docs = db.similarity_search(query)
docs[0].page_content
embedding_vector = embeddings.embed_query(query)
docs = db.similarity_search_by_vector(embedding_vector)
docs[0].page_content
docs_and_scores = db.similarity_search_with_score(query)
docs_and_scores[0]
retriever = db.as_retriever(search_type="mmr")
retriever.get_relevant_documents(query)
db.max_marginal_relevance_search(query, k=2, fetch_k=10)
import grpc
with open("test_root_cacert.crt", "rb") as root:
credentials = grpc.ssl_channel_credentials(root_certificates=root.read())
with open(".ztoken", "rb") as ztoken:
token = ztoken.read().strip()
metadata = [(b"athenz-role-auth", token)]
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import Vald
from langchain_text_splitters import CharacterTextSplitter
raw_documents = TextLoader("state_of_the_union.txt").load()
text_splitter = | CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) | langchain_text_splitters.CharacterTextSplitter |
from langchain.chains import HypotheticalDocumentEmbedder, LLMChain
from langchain.prompts import PromptTemplate
from langchain_openai import OpenAI, OpenAIEmbeddings
base_embeddings = OpenAIEmbeddings()
llm = OpenAI()
embeddings = HypotheticalDocumentEmbedder.from_llm(llm, base_embeddings, "web_search")
result = embeddings.embed_query("Where is the Taj Mahal?")
multi_llm = OpenAI(n=4, best_of=4)
embeddings = HypotheticalDocumentEmbedder.from_llm(
multi_llm, base_embeddings, "web_search"
)
result = embeddings.embed_query("Where is the Taj Mahal?")
prompt_template = """Please answer the user's question about the most recent state of the union address
Question: {question}
Answer:"""
prompt = PromptTemplate(input_variables=["question"], template=prompt_template)
llm_chain = LLMChain(llm=llm, prompt=prompt)
embeddings = HypotheticalDocumentEmbedder(
llm_chain=llm_chain, base_embeddings=base_embeddings
)
result = embeddings.embed_query(
"What did the president say about Ketanji Brown Jackson"
)
from langchain_community.vectorstores import Chroma
from langchain_text_splitters import CharacterTextSplitter
with open("../../state_of_the_union.txt") as f:
state_of_the_union = f.read()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_text(state_of_the_union)
docsearch = | Chroma.from_texts(texts, embeddings) | langchain_community.vectorstores.Chroma.from_texts |
import re
from typing import Union
from langchain.agents import (
AgentExecutor,
AgentOutputParser,
LLMSingleActionAgent,
)
from langchain.chains import LLMChain
from langchain.prompts import StringPromptTemplate
from langchain_community.agent_toolkits import NLAToolkit
from langchain_community.tools.plugin import AIPlugin
from langchain_core.agents import AgentAction, AgentFinish
from langchain_openai import OpenAI
llm = | OpenAI(temperature=0) | langchain_openai.OpenAI |
import os
from langchain.chains import ConversationalRetrievalChain
from langchain_community.vectorstores import Vectara
from langchain_openai import OpenAI
from langchain_community.document_loaders import TextLoader
loader = | TextLoader("state_of_the_union.txt") | langchain_community.document_loaders.TextLoader |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet llama-cpp-python')
get_ipython().system('CMAKE_ARGS="-DLLAMA_CUBLAS=on" FORCE_CMAKE=1 pip install llama-cpp-python')
get_ipython().system('CMAKE_ARGS="-DLLAMA_CUBLAS=on" FORCE_CMAKE=1 pip install --upgrade --force-reinstall llama-cpp-python --no-cache-dir')
get_ipython().system('CMAKE_ARGS="-DLLAMA_METAL=on" FORCE_CMAKE=1 pip install llama-cpp-python')
get_ipython().system('CMAKE_ARGS="-DLLAMA_METAL=on" FORCE_CMAKE=1 pip install --upgrade --force-reinstall llama-cpp-python --no-cache-dir')
get_ipython().system('python -m pip install -e . --force-reinstall --no-cache-dir')
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_community.llms import LlamaCpp
template = """Question: {question}
Answer: Let's work this out in a step by step way to be sure we have the right answer."""
prompt = PromptTemplate.from_template(template)
callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
llm = LlamaCpp(
model_path="/Users/rlm/Desktop/Code/llama.cpp/models/openorca-platypus2-13b.gguf.q4_0.bin",
temperature=0.75,
max_tokens=2000,
top_p=1,
callback_manager=callback_manager,
verbose=True, # Verbose is required to pass to the callback manager
)
prompt = """
Question: A rap battle between Stephen Colbert and John Oliver
"""
llm.invoke(prompt)
llm = LlamaCpp(
model_path="./ggml-model-q4_0.bin", callback_manager=callback_manager, verbose=True
)
llm_chain = | LLMChain(prompt=prompt, llm=llm) | langchain.chains.LLMChain |
from langchain_community.chat_message_histories import SQLChatMessageHistory
chat_message_history = SQLChatMessageHistory(
session_id="test_session", connection_string="sqlite:///sqlite.db"
)
chat_message_history.add_user_message("Hello")
chat_message_history.add_ai_message("Hi")
chat_message_history.messages
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_openai import ChatOpenAI
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a helpful assistant."),
| MessagesPlaceholder(variable_name="history") | langchain_core.prompts.MessagesPlaceholder |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet lark clickhouse-connect')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
os.environ["MYSCALE_HOST"] = getpass.getpass("MyScale URL:")
os.environ["MYSCALE_PORT"] = getpass.getpass("MyScale Port:")
os.environ["MYSCALE_USERNAME"] = getpass.getpass("MyScale Username:")
os.environ["MYSCALE_PASSWORD"] = getpass.getpass("MyScale Password:")
from langchain_community.vectorstores import MyScale
from langchain_core.documents import Document
from langchain_openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
docs = [
Document(
page_content="A bunch of scientists bring back dinosaurs and mayhem breaks loose",
metadata={"date": "1993-07-02", "rating": 7.7, "genre": ["science fiction"]},
),
Document(
page_content="Leo DiCaprio gets lost in a dream within a dream within a dream within a ...",
metadata={"date": "2010-12-30", "director": "Christopher Nolan", "rating": 8.2},
),
Document(
page_content="A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea",
metadata={"date": "2006-04-23", "director": "Satoshi Kon", "rating": 8.6},
),
Document(
page_content="A bunch of normal-sized women are supremely wholesome and some men pine after them",
metadata={"date": "2019-08-22", "director": "Greta Gerwig", "rating": 8.3},
),
Document(
page_content="Toys come alive and have a blast doing so",
metadata={"date": "1995-02-11", "genre": ["animated"]},
),
Document(
page_content="Three men walk into the Zone, three men walk out of the Zone",
metadata={
"date": "1979-09-10",
"director": "Andrei Tarkovsky",
"genre": ["science fiction", "adventure"],
"rating": 9.9,
},
),
]
vectorstore = MyScale.from_documents(
docs,
embeddings,
)
from langchain.chains.query_constructor.base import AttributeInfo
from langchain.retrievers.self_query.base import SelfQueryRetriever
from langchain_openai import OpenAI
metadata_field_info = [
AttributeInfo(
name="genre",
description="The genres of the movie",
type="list[string]",
),
AttributeInfo(
name="length(genre)",
description="The length of genres of the movie",
type="integer",
),
AttributeInfo(
name="date",
description="The date the movie was released",
type="timestamp",
),
AttributeInfo(
name="director",
description="The name of the movie director",
type="string",
),
AttributeInfo(
name="rating", description="A 1-10 rating for the movie", type="float"
),
]
document_content_description = "Brief summary of a movie"
llm = | OpenAI(temperature=0) | langchain_openai.OpenAI |
meals = [
"Beef Enchiladas with Feta cheese. Mexican-Greek fusion",
"Chicken Flatbreads with red sauce. Italian-Mexican fusion",
"Veggie sweet potato quesadillas with vegan cheese",
"One-Pan Tortelonni bake with peppers and onions",
]
from langchain_openai import OpenAI
llm = OpenAI(model="gpt-3.5-turbo-instruct")
from langchain.prompts import PromptTemplate
PROMPT_TEMPLATE = """Here is the description of a meal: "{meal}".
Embed the meal into the given text: "{text_to_personalize}".
Prepend a personalized message including the user's name "{user}"
and their preference "{preference}".
Make it sound good.
"""
PROMPT = PromptTemplate(
input_variables=["meal", "text_to_personalize", "user", "preference"],
template=PROMPT_TEMPLATE,
)
import langchain_experimental.rl_chain as rl_chain
chain = rl_chain.PickBest.from_llm(llm=llm, prompt=PROMPT)
response = chain.run(
meal=rl_chain.ToSelectFrom(meals),
user=rl_chain.BasedOn("Tom"),
preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]),
text_to_personalize="This is the weeks specialty dish, our master chefs \
believe you will love it!",
)
print(response["response"])
for _ in range(5):
try:
response = chain.run(
meal=rl_chain.ToSelectFrom(meals),
user=rl_chain.BasedOn("Tom"),
preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]),
text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!",
)
except Exception as e:
print(e)
print(response["response"])
print()
scoring_criteria_template = (
"Given {preference} rank how good or bad this selection is {meal}"
)
chain = rl_chain.PickBest.from_llm(
llm=llm,
prompt=PROMPT,
selection_scorer=rl_chain.AutoSelectionScorer(
llm=llm, scoring_criteria_template_str=scoring_criteria_template
),
)
response = chain.run(
meal=rl_chain.ToSelectFrom(meals),
user=rl_chain.BasedOn("Tom"),
preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]),
text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!",
)
print(response["response"])
selection_metadata = response["selection_metadata"]
print(
f"selected index: {selection_metadata.selected.index}, score: {selection_metadata.selected.score}"
)
class CustomSelectionScorer(rl_chain.SelectionScorer):
def score_response(
self, inputs, llm_response: str, event: rl_chain.PickBestEvent
) -> float:
print(event.based_on)
print(event.to_select_from)
selected_meal = event.to_select_from["meal"][event.selected.index]
print(f"selected meal: {selected_meal}")
if "Tom" in event.based_on["user"]:
if "Vegetarian" in event.based_on["preference"]:
if "Chicken" in selected_meal or "Beef" in selected_meal:
return 0.0
else:
return 1.0
else:
if "Chicken" in selected_meal or "Beef" in selected_meal:
return 1.0
else:
return 0.0
else:
raise NotImplementedError("I don't know how to score this user")
chain = rl_chain.PickBest.from_llm(
llm=llm,
prompt=PROMPT,
selection_scorer=CustomSelectionScorer(),
)
response = chain.run(
meal=rl_chain.ToSelectFrom(meals),
user=rl_chain.BasedOn("Tom"),
preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]),
text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!",
)
class CustomSelectionScorer(rl_chain.SelectionScorer):
def score_preference(self, preference, selected_meal):
if "Vegetarian" in preference:
if "Chicken" in selected_meal or "Beef" in selected_meal:
return 0.0
else:
return 1.0
else:
if "Chicken" in selected_meal or "Beef" in selected_meal:
return 1.0
else:
return 0.0
def score_response(
self, inputs, llm_response: str, event: rl_chain.PickBestEvent
) -> float:
selected_meal = event.to_select_from["meal"][event.selected.index]
if "Tom" in event.based_on["user"]:
return self.score_preference(event.based_on["preference"], selected_meal)
elif "Anna" in event.based_on["user"]:
return self.score_preference(event.based_on["preference"], selected_meal)
else:
raise NotImplementedError("I don't know how to score this user")
chain = rl_chain.PickBest.from_llm(
llm=llm,
prompt=PROMPT,
selection_scorer=CustomSelectionScorer(),
metrics_step=5,
metrics_window_size=5, # rolling window average
)
random_chain = rl_chain.PickBest.from_llm(
llm=llm,
prompt=PROMPT,
selection_scorer=CustomSelectionScorer(),
metrics_step=5,
metrics_window_size=5, # rolling window average
policy=rl_chain.PickBestRandomPolicy, # set the random policy instead of default
)
for _ in range(20):
try:
chain.run(
meal=rl_chain.ToSelectFrom(meals),
user=rl_chain.BasedOn("Tom"),
preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]),
text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!",
)
random_chain.run(
meal=rl_chain.ToSelectFrom(meals),
user=rl_chain.BasedOn("Tom"),
preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]),
text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!",
)
chain.run(
meal=rl_chain.ToSelectFrom(meals),
user=rl_chain.BasedOn("Anna"),
preference=rl_chain.BasedOn(["Loves meat", "especially beef"]),
text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!",
)
random_chain.run(
meal=rl_chain.ToSelectFrom(meals),
user=rl_chain.BasedOn("Anna"),
preference=rl_chain.BasedOn(["Loves meat", "especially beef"]),
text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!",
)
except Exception as e:
print(e)
from matplotlib import pyplot as plt
chain.metrics.to_pandas()["score"].plot(label="default learning policy")
random_chain.metrics.to_pandas()["score"].plot(label="random selection policy")
plt.legend()
print(
f"The final average score for the default policy, calculated over a rolling window, is: {chain.metrics.to_pandas()['score'].iloc[-1]}"
)
print(
f"The final average score for the random policy, calculated over a rolling window, is: {random_chain.metrics.to_pandas()['score'].iloc[-1]}"
)
from langchain.globals import set_debug
from langchain.prompts.prompt import PromptTemplate
set_debug(True)
REWARD_PROMPT_TEMPLATE = """
Given {preference} rank how good or bad this selection is {meal}
IMPORTANT: you MUST return a single number between -1 and 1, -1 being bad, 1 being good
"""
REWARD_PROMPT = PromptTemplate(
input_variables=["preference", "meal"],
template=REWARD_PROMPT_TEMPLATE,
)
chain = rl_chain.PickBest.from_llm(
llm=llm,
prompt=PROMPT,
selection_scorer=rl_chain.AutoSelectionScorer(llm=llm, prompt=REWARD_PROMPT),
)
chain.run(
meal=rl_chain.ToSelectFrom(meals),
user=rl_chain.BasedOn("Tom"),
preference= | rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]) | langchain_experimental.rl_chain.BasedOn |
from typing import Callable, List
from langchain.schema import (
HumanMessage,
SystemMessage,
)
from langchain_openai import ChatOpenAI
class DialogueAgent:
def __init__(
self,
name: str,
system_message: SystemMessage,
model: ChatOpenAI,
) -> None:
self.name = name
self.system_message = system_message
self.model = model
self.prefix = f"{self.name}: "
self.reset()
def reset(self):
self.message_history = ["Here is the conversation so far."]
def send(self) -> str:
"""
Applies the chatmodel to the message history
and returns the message string
"""
message = self.model(
[
self.system_message,
HumanMessage(content="\n".join(self.message_history + [self.prefix])),
]
)
return message.content
def receive(self, name: str, message: str) -> None:
"""
Concatenates {message} spoken by {name} into message history
"""
self.message_history.append(f"{name}: {message}")
class DialogueSimulator:
def __init__(
self,
agents: List[DialogueAgent],
selection_function: Callable[[int, List[DialogueAgent]], int],
) -> None:
self.agents = agents
self._step = 0
self.select_next_speaker = selection_function
def reset(self):
for agent in self.agents:
agent.reset()
def inject(self, name: str, message: str):
"""
Initiates the conversation with a {message} from {name}
"""
for agent in self.agents:
agent.receive(name, message)
self._step += 1
def step(self) -> tuple[str, str]:
speaker_idx = self.select_next_speaker(self._step, self.agents)
speaker = self.agents[speaker_idx]
message = speaker.send()
for receiver in self.agents:
receiver.receive(speaker.name, message)
self._step += 1
return speaker.name, message
character_names = ["Harry Potter", "Ron Weasley", "Hermione Granger", "Argus Filch"]
storyteller_name = "Dungeon Master"
quest = "Find all of Lord Voldemort's seven horcruxes."
word_limit = 50 # word limit for task brainstorming
game_description = f"""Here is the topic for a Dungeons & Dragons game: {quest}.
The characters are: {*character_names,}.
The story is narrated by the storyteller, {storyteller_name}."""
player_descriptor_system_message = SystemMessage(
content="You can add detail to the description of a Dungeons & Dragons player."
)
def generate_character_description(character_name):
character_specifier_prompt = [
player_descriptor_system_message,
HumanMessage(
content=f"""{game_description}
Please reply with a creative description of the character, {character_name}, in {word_limit} words or less.
Speak directly to {character_name}.
Do not add anything else."""
),
]
character_description = | ChatOpenAI(temperature=1.0) | langchain_openai.ChatOpenAI |
from typing import Optional
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_experimental.autonomous_agents import BabyAGI
from langchain_openai import OpenAI, OpenAIEmbeddings
get_ipython().run_line_magic('pip', 'install faiss-cpu > /dev/null')
get_ipython().run_line_magic('pip', 'install google-search-results > /dev/null')
from langchain.docstore import InMemoryDocstore
from langchain_community.vectorstores import FAISS
embeddings_model = OpenAIEmbeddings()
import faiss
embedding_size = 1536
index = faiss.IndexFlatL2(embedding_size)
vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {})
from langchain.agents import AgentExecutor, Tool, ZeroShotAgent
from langchain.chains import LLMChain
from langchain_community.utilities import SerpAPIWrapper
from langchain_openai import OpenAI
todo_prompt = PromptTemplate.from_template(
"You are a planner who is an expert at coming up with a todo list for a given objective. Come up with a todo list for this objective: {objective}"
)
todo_chain = LLMChain(llm=OpenAI(temperature=0), prompt=todo_prompt)
search = SerpAPIWrapper()
tools = [
Tool(
name="Search",
func=search.run,
description="useful for when you need to answer questions about current events",
),
Tool(
name="TODO",
func=todo_chain.run,
description="useful for when you need to come up with todo lists. Input: an objective to create a todo list for. Output: a todo list for that objective. Please be very clear what the objective is!",
),
]
prefix = """You are an AI who performs one task based on the following objective: {objective}. Take into account these previously completed tasks: {context}."""
suffix = """Question: {task}
{agent_scratchpad}"""
prompt = ZeroShotAgent.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
input_variables=["objective", "task", "context", "agent_scratchpad"],
)
llm = | OpenAI(temperature=0) | langchain_openai.OpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet sentence_transformers')
from langchain_community.embeddings import HuggingFaceEmbeddings
embeddings = HuggingFaceEmbeddings()
from langchain_community.document_loaders import TextLoader
from langchain_text_splitters import CharacterTextSplitter
loader = | TextLoader("../../modules/state_of_the_union.txt") | langchain_community.document_loaders.TextLoader |
from hugegraph.connection import PyHugeGraph
client = PyHugeGraph("localhost", "8080", user="admin", pwd="admin", graph="hugegraph")
"""schema"""
schema = client.schema()
schema.propertyKey("name").asText().ifNotExist().create()
schema.propertyKey("birthDate").asText().ifNotExist().create()
schema.vertexLabel("Person").properties(
"name", "birthDate"
).usePrimaryKeyId().primaryKeys("name").ifNotExist().create()
schema.vertexLabel("Movie").properties("name").usePrimaryKeyId().primaryKeys(
"name"
).ifNotExist().create()
schema.edgeLabel("ActedIn").sourceLabel("Person").targetLabel(
"Movie"
).ifNotExist().create()
"""graph"""
g = client.graph()
g.addVertex("Person", {"name": "Al Pacino", "birthDate": "1940-04-25"})
g.addVertex("Person", {"name": "Robert De Niro", "birthDate": "1943-08-17"})
g.addVertex("Movie", {"name": "The Godfather"})
g.addVertex("Movie", {"name": "The Godfather Part II"})
g.addVertex("Movie", {"name": "The Godfather Coda The Death of Michael Corleone"})
g.addEdge("ActedIn", "1:Al Pacino", "2:The Godfather", {})
g.addEdge("ActedIn", "1:Al Pacino", "2:The Godfather Part II", {})
g.addEdge(
"ActedIn", "1:Al Pacino", "2:The Godfather Coda The Death of Michael Corleone", {}
)
g.addEdge("ActedIn", "1:Robert De Niro", "2:The Godfather Part II", {})
from langchain.chains import HugeGraphQAChain
from langchain_community.graphs import HugeGraph
from langchain_openai import ChatOpenAI
graph = HugeGraph(
username="admin",
password="admin",
address="localhost",
port=8080,
graph="hugegraph",
)
print(graph.get_schema)
chain = HugeGraphQAChain.from_llm( | ChatOpenAI(temperature=0) | langchain_openai.ChatOpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet scikit-learn')
from langchain_community.retrievers import TFIDFRetriever
retriever = TFIDFRetriever.from_texts(["foo", "bar", "world", "hello", "foo bar"])
from langchain_core.documents import Document
retriever = TFIDFRetriever.from_documents(
[
Document(page_content="foo"),
| Document(page_content="bar") | langchain_core.documents.Document |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-text-splitters tiktoken')
with open("../../state_of_the_union.txt") as f:
state_of_the_union = f.read()
from langchain_text_splitters import CharacterTextSplitter
text_splitter = CharacterTextSplitter.from_tiktoken_encoder(
chunk_size=100, chunk_overlap=0
)
texts = text_splitter.split_text(state_of_the_union)
print(texts[0])
from langchain_text_splitters import TokenTextSplitter
text_splitter = TokenTextSplitter(chunk_size=10, chunk_overlap=0)
texts = text_splitter.split_text(state_of_the_union)
print(texts[0])
get_ipython().run_line_magic('pip', 'install --upgrade --quiet spacy')
with open("../../state_of_the_union.txt") as f:
state_of_the_union = f.read()
from langchain_text_splitters import SpacyTextSplitter
text_splitter = | SpacyTextSplitter(chunk_size=1000) | langchain_text_splitters.SpacyTextSplitter |
get_ipython().run_line_magic('pip', 'install -qU langchain-community langchain-openai')
from langchain_community.tools import MoveFileTool
from langchain_core.messages import HumanMessage
from langchain_core.utils.function_calling import convert_to_openai_function
from langchain_openai import ChatOpenAI
model = ChatOpenAI(model="gpt-3.5-turbo")
tools = [MoveFileTool()]
functions = [convert_to_openai_function(t) for t in tools]
functions[0]
message = model.invoke(
[HumanMessage(content="move file foo to bar")], functions=functions
)
message
message.additional_kwargs["function_call"]
model_with_functions = model.bind_functions(tools)
model_with_functions.invoke([HumanMessage(content="move file foo to bar")])
model_with_tools = model.bind_tools(tools)
model_with_tools.invoke([ | HumanMessage(content="move file foo to bar") | langchain_core.messages.HumanMessage |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet atlassian-python-api')
import os
from langchain.agents import AgentType, initialize_agent
from langchain_community.agent_toolkits.jira.toolkit import JiraToolkit
from langchain_community.utilities.jira import JiraAPIWrapper
from langchain_openai import OpenAI
os.environ["JIRA_API_TOKEN"] = "abc"
os.environ["JIRA_USERNAME"] = "123"
os.environ["JIRA_INSTANCE_URL"] = "https://jira.atlassian.com"
os.environ["OPENAI_API_KEY"] = "xyz"
llm = OpenAI(temperature=0)
jira = | JiraAPIWrapper() | langchain_community.utilities.jira.JiraAPIWrapper |
get_ipython().system(' pip install -U langchain openai chromadb langchain-experimental # (newest versions required for multi-modal)')
get_ipython().system(' pip install "unstructured[all-docs]" pillow pydantic lxml pillow matplotlib chromadb tiktoken')
from langchain_text_splitters import CharacterTextSplitter
from unstructured.partition.pdf import partition_pdf
def extract_pdf_elements(path, fname):
"""
Extract images, tables, and chunk text from a PDF file.
path: File path, which is used to dump images (.jpg)
fname: File name
"""
return partition_pdf(
filename=path + fname,
extract_images_in_pdf=False,
infer_table_structure=True,
chunking_strategy="by_title",
max_characters=4000,
new_after_n_chars=3800,
combine_text_under_n_chars=2000,
image_output_dir_path=path,
)
def categorize_elements(raw_pdf_elements):
"""
Categorize extracted elements from a PDF into tables and texts.
raw_pdf_elements: List of unstructured.documents.elements
"""
tables = []
texts = []
for element in raw_pdf_elements:
if "unstructured.documents.elements.Table" in str(type(element)):
tables.append(str(element))
elif "unstructured.documents.elements.CompositeElement" in str(type(element)):
texts.append(str(element))
return texts, tables
fpath = "/Users/rlm/Desktop/cj/"
fname = "cj.pdf"
raw_pdf_elements = extract_pdf_elements(fpath, fname)
texts, tables = categorize_elements(raw_pdf_elements)
text_splitter = CharacterTextSplitter.from_tiktoken_encoder(
chunk_size=4000, chunk_overlap=0
)
joined_texts = " ".join(texts)
texts_4k_token = text_splitter.split_text(joined_texts)
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
def generate_text_summaries(texts, tables, summarize_texts=False):
"""
Summarize text elements
texts: List of str
tables: List of str
summarize_texts: Bool to summarize texts
"""
prompt_text = """You are an assistant tasked with summarizing tables and text for retrieval. \
These summaries will be embedded and used to retrieve the raw text or table elements. \
Give a concise summary of the table or text that is well optimized for retrieval. Table or text: {element} """
prompt = ChatPromptTemplate.from_template(prompt_text)
model = ChatOpenAI(temperature=0, model="gpt-4")
summarize_chain = {"element": lambda x: x} | prompt | model | StrOutputParser()
text_summaries = []
table_summaries = []
if texts and summarize_texts:
text_summaries = summarize_chain.batch(texts, {"max_concurrency": 5})
elif texts:
text_summaries = texts
if tables:
table_summaries = summarize_chain.batch(tables, {"max_concurrency": 5})
return text_summaries, table_summaries
text_summaries, table_summaries = generate_text_summaries(
texts_4k_token, tables, summarize_texts=True
)
import base64
import os
from langchain_core.messages import HumanMessage
def encode_image(image_path):
"""Getting the base64 string"""
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode("utf-8")
def image_summarize(img_base64, prompt):
"""Make image summary"""
chat = | ChatOpenAI(model="gpt-4-vision-preview", max_tokens=1024) | langchain_openai.ChatOpenAI |
from langchain_community.document_loaders.chatgpt import ChatGPTLoader
loader = | ChatGPTLoader(log_file="./example_data/fake_conversations.json", num_logs=1) | langchain_community.document_loaders.chatgpt.ChatGPTLoader |
get_ipython().run_line_magic('', 'pip install --upgrade --quiet flashrank')
get_ipython().run_line_magic('', 'pip install --upgrade --quiet faiss')
get_ipython().run_line_magic('', 'pip install --upgrade --quiet faiss_cpu')
def pretty_print_docs(docs):
print(
f"\n{'-' * 100}\n".join(
[f"Document {i+1}:\n\n" + d.page_content for i, d in enumerate(docs)]
)
)
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass()
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import FAISS
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
documents = TextLoader(
"../../modules/state_of_the_union.txt",
).load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100)
texts = text_splitter.split_documents(documents)
embedding = OpenAIEmbeddings(model="text-embedding-ada-002")
retriever = FAISS.from_documents(texts, embedding).as_retriever(search_kwargs={"k": 20})
query = "What did the president say about Ketanji Brown Jackson"
docs = retriever.get_relevant_documents(query)
pretty_print_docs(docs)
from langchain.retrievers import ContextualCompressionRetriever
from langchain.retrievers.document_compressors import FlashrankRerank
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(temperature=0)
compressor = FlashrankRerank()
compression_retriever = ContextualCompressionRetriever(
base_compressor=compressor, base_retriever=retriever
)
compressed_docs = compression_retriever.get_relevant_documents(
"What did the president say about Ketanji Jackson Brown"
)
print([doc.metadata["id"] for doc in compressed_docs])
pretty_print_docs(compressed_docs)
from langchain.chains import RetrievalQA
chain = | RetrievalQA.from_chain_type(llm=llm, retriever=compression_retriever) | langchain.chains.RetrievalQA.from_chain_type |
from langchain.vectorstores import NeuralDBVectorStore
vectorstore = | NeuralDBVectorStore.from_scratch(thirdai_key="your-thirdai-key") | langchain.vectorstores.NeuralDBVectorStore.from_scratch |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet timescale-vector')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-openai')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet tiktoken')
import os
from dotenv import find_dotenv, load_dotenv
_ = load_dotenv(find_dotenv())
OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
from typing import Tuple
from datetime import datetime, timedelta
from langchain.docstore.document import Document
from langchain_community.document_loaders import TextLoader
from langchain_community.document_loaders.json_loader import JSONLoader
from langchain_community.vectorstores.timescalevector import TimescaleVector
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../../extras/modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
SERVICE_URL = os.environ["TIMESCALE_SERVICE_URL"]
COLLECTION_NAME = "state_of_the_union_test"
db = TimescaleVector.from_documents(
embedding=embeddings,
documents=docs,
collection_name=COLLECTION_NAME,
service_url=SERVICE_URL,
)
query = "What did the president say about Ketanji Brown Jackson"
docs_with_score = db.similarity_search_with_score(query)
for doc, score in docs_with_score:
print("-" * 80)
print("Score: ", score)
print(doc.page_content)
print("-" * 80)
retriever = db.as_retriever()
print(retriever)
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(temperature=0.1, model="gpt-3.5-turbo-16k")
from langchain.chains import RetrievalQA
qa_stuff = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=retriever,
verbose=True,
)
query = "What did the president say about Ketanji Brown Jackson?"
response = qa_stuff.run(query)
print(response)
from timescale_vector import client
def create_uuid(date_string: str):
if date_string is None:
return None
time_format = "%a %b %d %H:%M:%S %Y %z"
datetime_obj = datetime.strptime(date_string, time_format)
uuid = client.uuid_from_time(datetime_obj)
return str(uuid)
def split_name(input_string: str) -> Tuple[str, str]:
if input_string is None:
return None, None
start = input_string.find("<")
end = input_string.find(">")
name = input_string[:start].strip()
email = input_string[start + 1 : end].strip()
return name, email
def create_date(input_string: str) -> datetime:
if input_string is None:
return None
month_dict = {
"Jan": "01",
"Feb": "02",
"Mar": "03",
"Apr": "04",
"May": "05",
"Jun": "06",
"Jul": "07",
"Aug": "08",
"Sep": "09",
"Oct": "10",
"Nov": "11",
"Dec": "12",
}
components = input_string.split()
day = components[2]
month = month_dict[components[1]]
year = components[4]
time = components[3]
timezone_offset_minutes = int(components[5]) # Convert the offset to minutes
timezone_hours = timezone_offset_minutes // 60 # Calculate the hours
timezone_minutes = timezone_offset_minutes % 60 # Calculate the remaining minutes
timestamp_tz_str = (
f"{year}-{month}-{day} {time}+{timezone_hours:02}{timezone_minutes:02}"
)
return timestamp_tz_str
def extract_metadata(record: dict, metadata: dict) -> dict:
record_name, record_email = split_name(record["author"])
metadata["id"] = create_uuid(record["date"])
metadata["date"] = create_date(record["date"])
metadata["author_name"] = record_name
metadata["author_email"] = record_email
metadata["commit_hash"] = record["commit"]
return metadata
get_ipython().system('curl -O https://s3.amazonaws.com/assets.timescale.com/ai/ts_git_log.json')
FILE_PATH = "../../../../../ts_git_log.json"
loader = JSONLoader(
file_path=FILE_PATH,
jq_schema=".commit_history[]",
text_content=False,
metadata_func=extract_metadata,
)
documents = loader.load()
documents = [doc for doc in documents if doc.metadata["date"] is not None]
print(documents[0])
NUM_RECORDS = 500
documents = documents[:NUM_RECORDS]
text_splitter = CharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200,
)
docs = text_splitter.split_documents(documents)
COLLECTION_NAME = "timescale_commits"
embeddings = OpenAIEmbeddings()
db = TimescaleVector.from_documents(
embedding=embeddings,
ids=[doc.metadata["id"] for doc in docs],
documents=docs,
collection_name=COLLECTION_NAME,
service_url=SERVICE_URL,
time_partition_interval=timedelta(days=7),
)
start_dt = datetime(2023, 8, 1, 22, 10, 35) # Start date = 1 August 2023, 22:10:35
end_dt = datetime(2023, 8, 30, 22, 10, 35) # End date = 30 August 2023, 22:10:35
td = timedelta(days=7) # Time delta = 7 days
query = "What's new with TimescaleDB functions?"
docs_with_score = db.similarity_search_with_score(
query, start_date=start_dt, end_date=end_dt
)
for doc, score in docs_with_score:
print("-" * 80)
print("Score: ", score)
print("Date: ", doc.metadata["date"])
print(doc.page_content)
print("-" * 80)
docs_with_score = db.similarity_search_with_score(
query, start_date=start_dt, time_delta=td
)
for doc, score in docs_with_score:
print("-" * 80)
print("Score: ", score)
print("Date: ", doc.metadata["date"])
print(doc.page_content)
print("-" * 80)
docs_with_score = db.similarity_search_with_score(query, end_date=end_dt, time_delta=td)
for doc, score in docs_with_score:
print("-" * 80)
print("Score: ", score)
print("Date: ", doc.metadata["date"])
print(doc.page_content)
print("-" * 80)
docs_with_score = db.similarity_search_with_score(query, start_date=start_dt)
for doc, score in docs_with_score:
print("-" * 80)
print("Score: ", score)
print("Date: ", doc.metadata["date"])
print(doc.page_content)
print("-" * 80)
docs_with_score = db.similarity_search_with_score(query, end_date=end_dt)
for doc, score in docs_with_score:
print("-" * 80)
print("Score: ", score)
print("Date: ", doc.metadata["date"])
print(doc.page_content)
print("-" * 80)
retriever = db.as_retriever(search_kwargs={"start_date": start_dt, "end_date": end_dt})
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(temperature=0.1, model="gpt-3.5-turbo-16k")
from langchain.chains import RetrievalQA
qa_stuff = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=retriever,
verbose=True,
)
query = (
"What's new with the timescaledb functions? Tell me when these changes were made."
)
response = qa_stuff.run(query)
print(response)
COLLECTION_NAME = "timescale_commits"
embeddings = | OpenAIEmbeddings() | langchain_openai.OpenAIEmbeddings |
from langchain_community.chat_models import ChatDatabricks
from langchain_core.messages import HumanMessage
from mlflow.deployments import get_deploy_client
client = get_deploy_client("databricks")
secret = "secrets/<scope>/openai-api-key" # replace `<scope>` with your scope
name = "my-chat" # rename this if my-chat already exists
client.create_endpoint(
name=name,
config={
"served_entities": [
{
"name": "my-chat",
"external_model": {
"name": "gpt-4",
"provider": "openai",
"task": "llm/v1/chat",
"openai_config": {
"openai_api_key": "{{" + secret + "}}",
},
},
}
],
},
)
chat = ChatDatabricks(
target_uri="databricks",
endpoint=name,
temperature=0.1,
)
chat([HumanMessage(content="hello")])
from langchain_community.embeddings import DatabricksEmbeddings
embeddings = DatabricksEmbeddings(endpoint="databricks-bge-large-en")
embeddings.embed_query("hello")[:3]
from langchain_community.llms import Databricks
llm = | Databricks(endpoint_name="dolly") | langchain_community.llms.Databricks |
get_ipython().system(' pip install langchain unstructured[all-docs] pydantic lxml langchainhub')
get_ipython().system(' brew install tesseract')
get_ipython().system(' brew install poppler')
path = "/Users/rlm/Desktop/Papers/LLaMA2/"
from typing import Any
from pydantic import BaseModel
from unstructured.partition.pdf import partition_pdf
raw_pdf_elements = partition_pdf(
filename=path + "LLaMA2.pdf",
extract_images_in_pdf=False,
infer_table_structure=True,
chunking_strategy="by_title",
max_characters=4000,
new_after_n_chars=3800,
combine_text_under_n_chars=2000,
image_output_dir_path=path,
)
category_counts = {}
for element in raw_pdf_elements:
category = str(type(element))
if category in category_counts:
category_counts[category] += 1
else:
category_counts[category] = 1
unique_categories = set(category_counts.keys())
category_counts
class Element(BaseModel):
type: str
text: Any
categorized_elements = []
for element in raw_pdf_elements:
if "unstructured.documents.elements.Table" in str(type(element)):
categorized_elements.append(Element(type="table", text=str(element)))
elif "unstructured.documents.elements.CompositeElement" in str(type(element)):
categorized_elements.append(Element(type="text", text=str(element)))
table_elements = [e for e in categorized_elements if e.type == "table"]
print(len(table_elements))
text_elements = [e for e in categorized_elements if e.type == "text"]
print(len(text_elements))
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
prompt_text = """You are an assistant tasked with summarizing tables and text. \
Give a concise summary of the table or text. Table or text chunk: {element} """
prompt = ChatPromptTemplate.from_template(prompt_text)
model = ChatOpenAI(temperature=0, model="gpt-4")
summarize_chain = {"element": lambda x: x} | prompt | model | StrOutputParser()
tables = [i.text for i in table_elements]
table_summaries = summarize_chain.batch(tables, {"max_concurrency": 5})
texts = [i.text for i in text_elements]
text_summaries = summarize_chain.batch(texts, {"max_concurrency": 5})
import uuid
from langchain.retrievers.multi_vector import MultiVectorRetriever
from langchain.storage import InMemoryStore
from langchain_community.vectorstores import Chroma
from langchain_core.documents import Document
from langchain_openai import OpenAIEmbeddings
vectorstore = Chroma(collection_name="summaries", embedding_function=OpenAIEmbeddings())
store = InMemoryStore()
id_key = "doc_id"
retriever = MultiVectorRetriever(
vectorstore=vectorstore,
docstore=store,
id_key=id_key,
)
doc_ids = [str(uuid.uuid4()) for _ in texts]
summary_texts = [
Document(page_content=s, metadata={id_key: doc_ids[i]})
for i, s in enumerate(text_summaries)
]
retriever.vectorstore.add_documents(summary_texts)
retriever.docstore.mset(list(zip(doc_ids, texts)))
table_ids = [str(uuid.uuid4()) for _ in tables]
summary_tables = [
Document(page_content=s, metadata={id_key: table_ids[i]})
for i, s in enumerate(table_summaries)
]
retriever.vectorstore.add_documents(summary_tables)
retriever.docstore.mset(list(zip(table_ids, tables)))
from langchain_core.runnables import RunnablePassthrough
template = """Answer the question based only on the following context, which can include text and tables:
{context}
Question: {question}
"""
prompt = | ChatPromptTemplate.from_template(template) | langchain_core.prompts.ChatPromptTemplate.from_template |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet llmlingua accelerate')
def pretty_print_docs(docs):
print(
f"\n{'-' * 100}\n".join(
[f"Document {i+1}:\n\n" + d.page_content for i, d in enumerate(docs)]
)
)
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import FAISS
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
documents = TextLoader(
"../../modules/state_of_the_union.txt",
).load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100)
texts = text_splitter.split_documents(documents)
embedding = OpenAIEmbeddings(model="text-embedding-ada-002")
retriever = FAISS.from_documents(texts, embedding).as_retriever(search_kwargs={"k": 20})
query = "What did the president say about Ketanji Brown Jackson"
docs = retriever.get_relevant_documents(query)
pretty_print_docs(docs)
from langchain.retrievers import ContextualCompressionRetriever
from langchain_community.retrievers.document_compressors import LLMLinguaCompressor
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(temperature=0)
compressor = | LLMLinguaCompressor(model_name="openai-community/gpt2", device_map="cpu") | langchain_community.retrievers.document_compressors.LLMLinguaCompressor |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-community')
import os
os.environ["YDC_API_KEY"] = ""
os.environ["OPENAI_API_KEY"] = ""
from langchain_community.utilities.you import YouSearchAPIWrapper
utility = YouSearchAPIWrapper(num_web_results=1)
utility
import json
response = utility.raw_results(query="What is the weather in NY")
hits = response["hits"]
print(len(hits))
print(json.dumps(hits, indent=2))
response = utility.results(query="What is the weather in NY")
print(len(response))
print(response)
from langchain_community.retrievers.you import YouRetriever
retriever = | YouRetriever(num_web_results=1) | langchain_community.retrievers.you.YouRetriever |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet google-api-python-client > /dev/null')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet google-auth-oauthlib > /dev/null')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet google-auth-httplib2 > /dev/null')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet beautifulsoup4 > /dev/null # This is optional but is useful for parsing HTML messages')
from langchain_community.agent_toolkits import GmailToolkit
toolkit = GmailToolkit()
from langchain_community.tools.gmail.utils import (
build_resource_service,
get_gmail_credentials,
)
credentials = get_gmail_credentials(
token_file="token.json",
scopes=["https://mail.google.com/"],
client_secrets_file="credentials.json",
)
api_resource = build_resource_service(credentials=credentials)
toolkit = GmailToolkit(api_resource=api_resource)
tools = toolkit.get_tools()
tools
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass()
from langchain import hub
from langchain.agents import AgentExecutor, create_openai_functions_agent
from langchain_openai import ChatOpenAI
instructions = """You are an assistant."""
base_prompt = | hub.pull("langchain-ai/openai-functions-template") | langchain.hub.pull |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet llmlingua accelerate')
def pretty_print_docs(docs):
print(
f"\n{'-' * 100}\n".join(
[f"Document {i+1}:\n\n" + d.page_content for i, d in enumerate(docs)]
)
)
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import FAISS
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
documents = TextLoader(
"../../modules/state_of_the_union.txt",
).load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100)
texts = text_splitter.split_documents(documents)
embedding = | OpenAIEmbeddings(model="text-embedding-ada-002") | langchain_openai.OpenAIEmbeddings |
get_ipython().system(' pip install -U langchain openai chromadb langchain-experimental # (newest versions required for multi-modal)')
get_ipython().system(' pip install "unstructured[all-docs]==0.10.19" pillow pydantic lxml pillow matplotlib tiktoken open_clip_torch torch')
path = "/Users/rlm/Desktop/photos/"
from unstructured.partition.pdf import partition_pdf
raw_pdf_elements = partition_pdf(
filename=path + "photos.pdf",
extract_images_in_pdf=True,
infer_table_structure=True,
chunking_strategy="by_title",
max_characters=4000,
new_after_n_chars=3800,
combine_text_under_n_chars=2000,
image_output_dir_path=path,
)
tables = []
texts = []
for element in raw_pdf_elements:
if "unstructured.documents.elements.Table" in str(type(element)):
tables.append(str(element))
elif "unstructured.documents.elements.CompositeElement" in str(type(element)):
texts.append(str(element))
import os
import uuid
import chromadb
import numpy as np
from langchain_community.vectorstores import Chroma
from langchain_experimental.open_clip import OpenCLIPEmbeddings
from PIL import Image as _PILImage
vectorstore = Chroma(
collection_name="mm_rag_clip_photos", embedding_function= | OpenCLIPEmbeddings() | langchain_experimental.open_clip.OpenCLIPEmbeddings |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet lark')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet chromadb')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
from langchain_community.vectorstores import Chroma
from langchain_core.documents import Document
from langchain_openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
docs = [
Document(
page_content="A bunch of scientists bring back dinosaurs and mayhem breaks loose",
metadata={"year": 1993, "rating": 7.7, "genre": "science fiction"},
),
Document(
page_content="Leo DiCaprio gets lost in a dream within a dream within a dream within a ...",
metadata={"year": 2010, "director": "Christopher Nolan", "rating": 8.2},
),
Document(
page_content="A psychologist / detective gets lost in a series of dreams within dreams within dreams and Inception reused the idea",
metadata={"year": 2006, "director": "Satoshi Kon", "rating": 8.6},
),
Document(
page_content="A bunch of normal-sized women are supremely wholesome and some men pine after them",
metadata={"year": 2019, "director": "Greta Gerwig", "rating": 8.3},
),
Document(
page_content="Toys come alive and have a blast doing so",
metadata={"year": 1995, "genre": "animated"},
),
Document(
page_content="Three men walk into the Zone, three men walk out of the Zone",
metadata={
"year": 1979,
"director": "Andrei Tarkovsky",
"genre": "science fiction",
"rating": 9.9,
},
),
]
vectorstore = Chroma.from_documents(docs, embeddings)
from langchain.chains.query_constructor.base import AttributeInfo
from langchain.retrievers.self_query.base import SelfQueryRetriever
from langchain_openai import OpenAI
metadata_field_info = [
AttributeInfo(
name="genre",
description="The genre of the movie",
type="string or list[string]",
),
AttributeInfo(
name="year",
description="The year the movie was released",
type="integer",
),
AttributeInfo(
name="director",
description="The name of the movie director",
type="string",
),
AttributeInfo(
name="rating", description="A 1-10 rating for the movie", type="float"
),
]
document_content_description = "Brief summary of a movie"
llm = | OpenAI(temperature=0) | langchain_openai.OpenAI |
get_ipython().run_line_magic('pip', "install --upgrade --quiet langchain-openai 'deeplake[enterprise]' tiktoken")
from langchain_community.vectorstores import DeepLake
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
activeloop_token = getpass.getpass("activeloop token:")
embeddings = OpenAIEmbeddings()
from langchain_community.document_loaders import TextLoader
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = | CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) | langchain_text_splitters.CharacterTextSplitter |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet lm-format-enforcer > /dev/null')
import logging
from langchain_experimental.pydantic_v1 import BaseModel
logging.basicConfig(level=logging.ERROR)
class PlayerInformation(BaseModel):
first_name: str
last_name: str
num_seasons_in_nba: int
year_of_birth: int
import torch
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer
model_id = "meta-llama/Llama-2-7b-chat-hf"
device = "cuda"
if torch.cuda.is_available():
config = AutoConfig.from_pretrained(model_id)
config.pretraining_tp = 1
model = AutoModelForCausalLM.from_pretrained(
model_id,
config=config,
torch_dtype=torch.float16,
load_in_8bit=True,
device_map="auto",
)
else:
raise Exception("GPU not available")
tokenizer = AutoTokenizer.from_pretrained(model_id)
if tokenizer.pad_token_id is None:
tokenizer.pad_token_id = tokenizer.eos_token_id
DEFAULT_SYSTEM_PROMPT = """\
You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.\n\nIf a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.\
"""
prompt = """Please give me information about {player_name}. You must respond using JSON format, according to the following schema:
{arg_schema}
"""
def make_instruction_prompt(message):
return f"[INST] <<SYS>>\n{DEFAULT_SYSTEM_PROMPT}\n<</SYS>> {message} [/INST]"
def get_prompt(player_name):
return make_instruction_prompt(
prompt.format(
player_name=player_name, arg_schema=PlayerInformation.schema_json()
)
)
from langchain_community.llms import HuggingFacePipeline
from transformers import pipeline
hf_model = pipeline(
"text-generation", model=model, tokenizer=tokenizer, max_new_tokens=200
)
original_model = | HuggingFacePipeline(pipeline=hf_model) | langchain_community.llms.HuggingFacePipeline |
from langchain_community.llms.azureml_endpoint import AzureMLOnlineEndpoint
from langchain_community.llms.azureml_endpoint import (
AzureMLEndpointApiType,
LlamaContentFormatter,
)
from langchain_core.messages import HumanMessage
llm = AzureMLOnlineEndpoint(
endpoint_url="https://<your-endpoint>.<your_region>.inference.ml.azure.com/score",
endpoint_api_type=AzureMLEndpointApiType.realtime,
endpoint_api_key="my-api-key",
content_formatter=LlamaContentFormatter(),
model_kwargs={"temperature": 0.8, "max_new_tokens": 400},
)
response = llm.invoke("Write me a song about sparkling water:")
response
response = llm.invoke("Write me a song about sparkling water:", temperature=0.5)
response
from langchain_community.llms.azureml_endpoint import (
AzureMLEndpointApiType,
LlamaContentFormatter,
)
from langchain_core.messages import HumanMessage
llm = AzureMLOnlineEndpoint(
endpoint_url="https://<your-endpoint>.<your_region>.inference.ml.azure.com/v1/completions",
endpoint_api_type=AzureMLEndpointApiType.serverless,
endpoint_api_key="my-api-key",
content_formatter=LlamaContentFormatter(),
model_kwargs={"temperature": 0.8, "max_new_tokens": 400},
)
response = llm.invoke("Write me a song about sparkling water:")
response
import json
import os
from typing import Dict
from langchain_community.llms.azureml_endpoint import (
AzureMLOnlineEndpoint,
ContentFormatterBase,
)
class CustomFormatter(ContentFormatterBase):
content_type = "application/json"
accepts = "application/json"
def format_request_payload(self, prompt: str, model_kwargs: Dict) -> bytes:
input_str = json.dumps(
{
"inputs": [prompt],
"parameters": model_kwargs,
"options": {"use_cache": False, "wait_for_model": True},
}
)
return str.encode(input_str)
def format_response_payload(self, output: bytes) -> str:
response_json = json.loads(output)
return response_json[0]["summary_text"]
content_formatter = CustomFormatter()
llm = AzureMLOnlineEndpoint(
endpoint_api_type="realtime",
endpoint_api_key=os.getenv("BART_ENDPOINT_API_KEY"),
endpoint_url=os.getenv("BART_ENDPOINT_URL"),
model_kwargs={"temperature": 0.8, "max_new_tokens": 400},
content_formatter=content_formatter,
)
large_text = """On January 7, 2020, Blockberry Creative announced that HaSeul would not participate in the promotion for Loona's
next album because of mental health concerns. She was said to be diagnosed with "intermittent anxiety symptoms" and would be
taking time to focus on her health.[39] On February 5, 2020, Loona released their second EP titled [#] (read as hash), along
with the title track "So What".[40] Although HaSeul did not appear in the title track, her vocals are featured on three other
songs on the album, including "365". Once peaked at number 1 on the daily Gaon Retail Album Chart,[41] the EP then debuted at
number 2 on the weekly Gaon Album Chart. On March 12, 2020, Loona won their first music show trophy with "So What" on Mnet's
M Countdown.[42]
On October 19, 2020, Loona released their third EP titled [12:00] (read as midnight),[43] accompanied by its first single
"Why Not?". HaSeul was again not involved in the album, out of her own decision to focus on the recovery of her health.[44]
The EP then became their first album to enter the Billboard 200, debuting at number 112.[45] On November 18, Loona released
the music video for "Star", another song on [12:00].[46] Peaking at number 40, "Star" is Loona's first entry on the Billboard
Mainstream Top 40, making them the second K-pop girl group to enter the chart.[47]
On June 1, 2021, Loona announced that they would be having a comeback on June 28, with their fourth EP, [&] (read as and).
[48] The following day, on June 2, a teaser was posted to Loona's official social media accounts showing twelve sets of eyes,
confirming the return of member HaSeul who had been on hiatus since early 2020.[49] On June 12, group members YeoJin, Kim Lip,
Choerry, and Go Won released the song "Yum-Yum" as a collaboration with Cocomong.[50] On September 8, they released another
collaboration song named "Yummy-Yummy".[51] On June 27, 2021, Loona announced at the end of their special clip that they are
making their Japanese debut on September 15 under Universal Music Japan sublabel EMI Records.[52] On August 27, it was announced
that Loona will release the double A-side single, "Hula Hoop / Star Seed" on September 15, with a physical CD release on October
20.[53] In December, Chuu filed an injunction to suspend her exclusive contract with Blockberry Creative.[54][55]
"""
summarized_text = llm.invoke(large_text)
print(summarized_text)
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_community.llms.azureml_endpoint import DollyContentFormatter
formatter_template = "Write a {word_count} word essay about {topic}."
prompt = PromptTemplate(
input_variables=["word_count", "topic"], template=formatter_template
)
content_formatter = DollyContentFormatter()
llm = AzureMLOnlineEndpoint(
endpoint_api_key=os.getenv("DOLLY_ENDPOINT_API_KEY"),
endpoint_url=os.getenv("DOLLY_ENDPOINT_URL"),
model_kwargs={"temperature": 0.8, "max_tokens": 300},
content_formatter=content_formatter,
)
chain = | LLMChain(llm=llm, prompt=prompt) | langchain.chains.LLMChain |
from langchain_community.embeddings.fake import FakeEmbeddings
from langchain_community.vectorstores import Tair
from langchain_text_splitters import CharacterTextSplitter
from langchain_community.document_loaders import TextLoader
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = | FakeEmbeddings(size=128) | langchain_community.embeddings.fake.FakeEmbeddings |
get_ipython().system(' nomic login')
get_ipython().system(' nomic login token')
get_ipython().system(' pip install -U langchain-nomic langchain_community tiktoken langchain-openai chromadb langchain')
import os
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com"
os.environ["LANGCHAIN_API_KEY"] = "api_key"
from langchain_community.document_loaders import WebBaseLoader
urls = [
"https://lilianweng.github.io/posts/2023-06-23-agent/",
"https://lilianweng.github.io/posts/2023-03-15-prompt-engineering/",
"https://lilianweng.github.io/posts/2023-10-25-adv-attack-llm/",
]
docs = [WebBaseLoader(url).load() for url in urls]
docs_list = [item for sublist in docs for item in sublist]
from langchain_text_splitters import CharacterTextSplitter
text_splitter = CharacterTextSplitter.from_tiktoken_encoder(
chunk_size=7500, chunk_overlap=100
)
doc_splits = text_splitter.split_documents(docs_list)
import tiktoken
encoding = tiktoken.get_encoding("cl100k_base")
encoding = tiktoken.encoding_for_model("gpt-3.5-turbo")
for d in doc_splits:
print("The document is %s tokens" % len(encoding.encode(d.page_content)))
import os
from langchain_community.vectorstores import Chroma
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnableLambda, RunnablePassthrough
from langchain_nomic import NomicEmbeddings
from langchain_nomic.embeddings import NomicEmbeddings
vectorstore = Chroma.from_documents(
documents=doc_splits,
collection_name="rag-chroma",
embedding=NomicEmbeddings(model="nomic-embed-text-v1"),
)
retriever = vectorstore.as_retriever()
from langchain_community.chat_models import ChatOllama
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
template = """Answer the question based only on the following context:
{context}
Question: {question}
"""
prompt = | ChatPromptTemplate.from_template(template) | langchain_core.prompts.ChatPromptTemplate.from_template |
import json
from pprint import pprint
from langchain.globals import set_debug
from langchain_community.llms import NIBittensorLLM
set_debug(True)
llm_sys = NIBittensorLLM(
system_prompt="Your task is to determine response based on user prompt.Explain me like I am technical lead of a project"
)
sys_resp = llm_sys(
"What is bittensor and What are the potential benefits of decentralized AI?"
)
print(f"Response provided by LLM with system prompt set is : {sys_resp}")
""" {
"choices": [
{"index": Bittensor's Metagraph index number,
"uid": Unique Identifier of a miner,
"responder_hotkey": Hotkey of a miner,
"message":{"role":"assistant","content": Contains actual response},
"response_ms": Time in millisecond required to fetch response from a miner}
]
} """
multi_response_llm = NIBittensorLLM(top_responses=10)
multi_resp = multi_response_llm("What is Neural Network Feeding Mechanism?")
json_multi_resp = json.loads(multi_resp)
pprint(json_multi_resp)
from langchain.chains import LLMChain
from langchain.globals import set_debug
from langchain.prompts import PromptTemplate
from langchain_community.llms import NIBittensorLLM
set_debug(True)
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate.from_template(template)
llm = NIBittensorLLM(
system_prompt="Your task is to determine response based on user prompt."
)
llm_chain = LLMChain(prompt=prompt, llm=llm)
question = "What is bittensor?"
llm_chain.run(question)
from langchain.tools import Tool
from langchain_community.utilities import GoogleSearchAPIWrapper
search = GoogleSearchAPIWrapper()
tool = Tool(
name="Google Search",
description="Search Google for recent results.",
func=search.run,
)
from langchain.agents import (
AgentExecutor,
ZeroShotAgent,
)
from langchain.chains import LLMChain
from langchain.memory import ConversationBufferMemory
from langchain.prompts import PromptTemplate
from langchain_community.llms import NIBittensorLLM
memory = ConversationBufferMemory(memory_key="chat_history")
tools = [tool]
prefix = """Answer prompt based on LLM if there is need to search something then use internet and observe internet result and give accurate reply of user questions also try to use authenticated sources"""
suffix = """Begin!
{chat_history}
Question: {input}
{agent_scratchpad}"""
prompt = ZeroShotAgent.create_prompt(
tools=tools,
prefix=prefix,
suffix=suffix,
input_variables=["input", "chat_history", "agent_scratchpad"],
)
llm = NIBittensorLLM(
system_prompt="Your task is to determine a response based on user prompt"
)
llm_chain = LLMChain(llm=llm, prompt=prompt)
memory = | ConversationBufferMemory(memory_key="chat_history") | langchain.memory.ConversationBufferMemory |
from langchain.retrievers import ParentDocumentRetriever
from langchain.storage import InMemoryStore
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
loaders = [
TextLoader("../../paul_graham_essay.txt"),
TextLoader("../../state_of_the_union.txt"),
]
docs = []
for loader in loaders:
docs.extend(loader.load())
child_splitter = RecursiveCharacterTextSplitter(chunk_size=400)
vectorstore = Chroma(
collection_name="full_documents", embedding_function=OpenAIEmbeddings()
)
store = | InMemoryStore() | langchain.storage.InMemoryStore |
get_ipython().system('poetry run pip install dgml-utils==0.3.0 --upgrade --quiet')
import os
from langchain_community.document_loaders import DocugamiLoader
DOCUGAMI_API_KEY = os.environ.get("DOCUGAMI_API_KEY")
docset_id = "26xpy3aes7xp"
document_ids = ["d7jqdzcj50sj", "cgd1eacfkchw"]
loader = DocugamiLoader(docset_id=docset_id, document_ids=document_ids)
chunks = loader.load()
len(chunks)
loader.min_text_length = 64
loader.include_xml_tags = True
chunks = loader.load()
for chunk in chunks[:5]:
print(chunk)
get_ipython().system('poetry run pip install --upgrade langchain-openai tiktoken chromadb hnswlib')
loader = DocugamiLoader(docset_id="zo954yqy53wp")
chunks = loader.load()
for chunk in chunks:
stripped_metadata = chunk.metadata.copy()
for key in chunk.metadata:
if key not in ["name", "xpath", "id", "structure"]:
del stripped_metadata[key]
chunk.metadata = stripped_metadata
print(len(chunks))
from langchain.chains import RetrievalQA
from langchain_community.vectorstores.chroma import Chroma
from langchain_openai import OpenAI, OpenAIEmbeddings
embedding = OpenAIEmbeddings()
vectordb = Chroma.from_documents(documents=chunks, embedding=embedding)
retriever = vectordb.as_retriever()
qa_chain = RetrievalQA.from_chain_type(
llm=OpenAI(), chain_type="stuff", retriever=retriever, return_source_documents=True
)
qa_chain("What can tenants do with signage on their properties?")
chain_response = qa_chain("What is rentable area for the property owned by DHA Group?")
chain_response["result"] # correct answer should be 13,500 sq ft
chain_response["source_documents"]
loader = DocugamiLoader(docset_id="zo954yqy53wp")
loader.include_xml_tags = (
True # for additional semantics from the Docugami knowledge graph
)
chunks = loader.load()
print(chunks[0].metadata)
get_ipython().system('poetry run pip install --upgrade lark --quiet')
from langchain.chains.query_constructor.schema import AttributeInfo
from langchain.retrievers.self_query.base import SelfQueryRetriever
from langchain_community.vectorstores.chroma import Chroma
EXCLUDE_KEYS = ["id", "xpath", "structure"]
metadata_field_info = [
AttributeInfo(
name=key,
description=f"The {key} for this chunk",
type="string",
)
for key in chunks[0].metadata
if key.lower() not in EXCLUDE_KEYS
]
document_content_description = "Contents of this chunk"
llm = OpenAI(temperature=0)
vectordb = | Chroma.from_documents(documents=chunks, embedding=embedding) | langchain_community.vectorstores.chroma.Chroma.from_documents |
get_ipython().system(' pip install -U langchain openai chromadb langchain-experimental # (newest versions required for multi-modal)')
get_ipython().system(' pip install "unstructured[all-docs]==0.10.19" pillow pydantic lxml pillow matplotlib tiktoken open_clip_torch torch')
path = "/Users/rlm/Desktop/cpi/"
from langchain_community.document_loaders import PyPDFLoader
loader = PyPDFLoader(path + "cpi.pdf")
pdf_pages = loader.load()
from langchain_text_splitters import RecursiveCharacterTextSplitter
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
all_splits_pypdf = text_splitter.split_documents(pdf_pages)
all_splits_pypdf_texts = [d.page_content for d in all_splits_pypdf]
from unstructured.partition.pdf import partition_pdf
raw_pdf_elements = partition_pdf(
filename=path + "cpi.pdf",
extract_images_in_pdf=True,
infer_table_structure=True,
chunking_strategy="by_title",
max_characters=4000,
new_after_n_chars=3800,
combine_text_under_n_chars=2000,
image_output_dir_path=path,
)
tables = []
texts = []
for element in raw_pdf_elements:
if "unstructured.documents.elements.Table" in str(type(element)):
tables.append(str(element))
elif "unstructured.documents.elements.CompositeElement" in str(type(element)):
texts.append(str(element))
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
baseline = Chroma.from_texts(
texts=all_splits_pypdf_texts,
collection_name="baseline",
embedding=OpenAIEmbeddings(),
)
retriever_baseline = baseline.as_retriever()
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
prompt_text = """You are an assistant tasked with summarizing tables and text for retrieval. \
These summaries will be embedded and used to retrieve the raw text or table elements. \
Give a concise summary of the table or text that is well optimized for retrieval. Table or text: {element} """
prompt = ChatPromptTemplate.from_template(prompt_text)
model = | ChatOpenAI(temperature=0, model="gpt-4") | langchain_openai.ChatOpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet "xinference[all]"')
get_ipython().system('xinference launch -n vicuna-v1.3 -f ggmlv3 -q q4_0')
from langchain_community.llms import Xinference
llm = Xinference(
server_url="http://0.0.0.0:9997", model_uid="7167b2b0-2a04-11ee-83f0-d29396a3f064"
)
llm(
prompt="Q: where can we visit in the capital of France? A:",
generate_config={"max_tokens": 1024, "stream": True},
)
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
template = "Where can we visit in the capital of {country}?"
prompt = PromptTemplate.from_template(template)
llm_chain = | LLMChain(prompt=prompt, llm=llm) | langchain.chains.LLMChain |
from typing import List
from langchain.output_parsers import PydanticOutputParser
from langchain.prompts import PromptTemplate
from langchain_core.pydantic_v1 import BaseModel, Field, validator
from langchain_openai import ChatOpenAI
model = ChatOpenAI(temperature=0)
class Joke(BaseModel):
setup: str = Field(description="question to set up a joke")
punchline: str = Field(description="answer to resolve the joke")
@validator("setup")
def question_ends_with_question_mark(cls, field):
if field[-1] != "?":
raise ValueError("Badly formed question!")
return field
joke_query = "Tell me a joke."
parser = | PydanticOutputParser(pydantic_object=Joke) | langchain.output_parsers.PydanticOutputParser |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet neo4j')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-openai')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet tiktoken')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
from langchain.docstore.document import Document
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Neo4jVector
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
url = "bolt://localhost:7687"
username = "neo4j"
password = "pleaseletmein"
db = Neo4jVector.from_documents(
docs, OpenAIEmbeddings(), url=url, username=username, password=password
)
query = "What did the president say about Ketanji Brown Jackson"
docs_with_score = db.similarity_search_with_score(query, k=2)
for doc, score in docs_with_score:
print("-" * 80)
print("Score: ", score)
print(doc.page_content)
print("-" * 80)
index_name = "vector" # default index name
store = Neo4jVector.from_existing_index(
OpenAIEmbeddings(),
url=url,
username=username,
password=password,
index_name=index_name,
)
store.query("CREATE (p:Person {name: 'Tomaz', location:'Slovenia', hobby:'Bicycle'})")
existing_graph = Neo4jVector.from_existing_graph(
embedding=OpenAIEmbeddings(),
url=url,
username=username,
password=password,
index_name="person_index",
node_label="Person",
text_node_properties=["name", "location"],
embedding_node_property="embedding",
)
result = existing_graph.similarity_search("Slovenia", k=1)
result[0]
store.add_documents([ | Document(page_content="foo") | langchain.docstore.document.Document |
from typing import List
from langchain.prompts import PromptTemplate
from langchain_core.output_parsers import JsonOutputParser
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_openai import ChatOpenAI
model = ChatOpenAI(temperature=0)
class Joke(BaseModel):
setup: str = Field(description="question to set up a joke")
punchline: str = Field(description="answer to resolve the joke")
joke_query = "Tell me a joke."
parser = JsonOutputParser(pydantic_object=Joke)
prompt = PromptTemplate(
template="Answer the user query.\n{format_instructions}\n{query}\n",
input_variables=["query"],
partial_variables={"format_instructions": parser.get_format_instructions()},
)
chain = prompt | model | parser
chain.invoke({"query": joke_query})
for s in chain.stream({"query": joke_query}):
print(s)
joke_query = "Tell me a joke."
parser = | JsonOutputParser() | langchain_core.output_parsers.JsonOutputParser |
from langchain.agents import AgentExecutor, Tool, ZeroShotAgent
from langchain.chains import LLMChain
from langchain.memory import ConversationBufferMemory
from langchain_community.chat_message_histories import RedisChatMessageHistory
from langchain_community.utilities import GoogleSearchAPIWrapper
from langchain_openai import OpenAI
search = GoogleSearchAPIWrapper()
tools = [
Tool(
name="Search",
func=search.run,
description="useful for when you need to answer questions about current events",
)
]
prefix = """Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:"""
suffix = """Begin!"
{chat_history}
Question: {input}
{agent_scratchpad}"""
prompt = ZeroShotAgent.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
input_variables=["input", "chat_history", "agent_scratchpad"],
)
message_history = RedisChatMessageHistory(
url="redis://localhost:6379/0", ttl=600, session_id="my-session"
)
memory = ConversationBufferMemory(
memory_key="chat_history", chat_memory=message_history
)
llm_chain = LLMChain(llm= | OpenAI(temperature=0) | langchain_openai.OpenAI |
import pprint
from langchain_community.utilities import SearxSearchWrapper
search = | SearxSearchWrapper(searx_host="http://127.0.0.1:8888") | langchain_community.utilities.SearxSearchWrapper |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"Write out the following equation using algebraic symbols then solve it. Use the format\n\nEQUATION:...\nSOLUTION:...\n\n",
),
("human", "{equation_statement}"),
]
)
model = ChatOpenAI(temperature=0)
runnable = (
{"equation_statement": RunnablePassthrough()} | prompt | model | StrOutputParser()
)
print(runnable.invoke("x raised to the third plus seven equals 12"))
runnable = (
{"equation_statement": RunnablePassthrough()}
| prompt
| model.bind(stop="SOLUTION")
| StrOutputParser()
)
print(runnable.invoke("x raised to the third plus seven equals 12"))
function = {
"name": "solver",
"description": "Formulates and solves an equation",
"parameters": {
"type": "object",
"properties": {
"equation": {
"type": "string",
"description": "The algebraic expression of the equation",
},
"solution": {
"type": "string",
"description": "The solution to the equation",
},
},
"required": ["equation", "solution"],
},
}
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"Write out the following equation using algebraic symbols then solve it.",
),
("human", "{equation_statement}"),
]
)
model = ChatOpenAI(model="gpt-4", temperature=0).bind(
function_call={"name": "solver"}, functions=[function]
)
runnable = {"equation_statement": | RunnablePassthrough() | langchain_core.runnables.RunnablePassthrough |
from typing import List
from langchain.prompts import PromptTemplate
from langchain_core.output_parsers import JsonOutputParser
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_openai import ChatOpenAI
model = ChatOpenAI(temperature=0)
class Joke(BaseModel):
setup: str = Field(description="question to set up a joke")
punchline: str = Field(description="answer to resolve the joke")
joke_query = "Tell me a joke."
parser = | JsonOutputParser(pydantic_object=Joke) | langchain_core.output_parsers.JsonOutputParser |
meals = [
"Beef Enchiladas with Feta cheese. Mexican-Greek fusion",
"Chicken Flatbreads with red sauce. Italian-Mexican fusion",
"Veggie sweet potato quesadillas with vegan cheese",
"One-Pan Tortelonni bake with peppers and onions",
]
from langchain_openai import OpenAI
llm = OpenAI(model="gpt-3.5-turbo-instruct")
from langchain.prompts import PromptTemplate
PROMPT_TEMPLATE = """Here is the description of a meal: "{meal}".
Embed the meal into the given text: "{text_to_personalize}".
Prepend a personalized message including the user's name "{user}"
and their preference "{preference}".
Make it sound good.
"""
PROMPT = PromptTemplate(
input_variables=["meal", "text_to_personalize", "user", "preference"],
template=PROMPT_TEMPLATE,
)
import langchain_experimental.rl_chain as rl_chain
chain = rl_chain.PickBest.from_llm(llm=llm, prompt=PROMPT)
response = chain.run(
meal=rl_chain.ToSelectFrom(meals),
user= | rl_chain.BasedOn("Tom") | langchain_experimental.rl_chain.BasedOn |
from langchain.chains import RetrievalQA
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAI, OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
llm = | OpenAI(temperature=0) | langchain_openai.OpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-elasticsearch langchain-openai tiktoken langchain')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
from langchain_elasticsearch import ElasticsearchStore
from langchain_openai import OpenAIEmbeddings
from langchain_community.document_loaders import TextLoader
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
db = ElasticsearchStore.from_documents(
docs,
embeddings,
es_url="http://localhost:9200",
index_name="test-basic",
)
db.client.indices.refresh(index="test-basic")
query = "What did the president say about Ketanji Brown Jackson"
results = db.similarity_search(query)
print(results)
for i, doc in enumerate(docs):
doc.metadata["date"] = f"{range(2010, 2020)[i % 10]}-01-01"
doc.metadata["rating"] = range(1, 6)[i % 5]
doc.metadata["author"] = ["John Doe", "Jane Doe"][i % 2]
db = ElasticsearchStore.from_documents(
docs, embeddings, es_url="http://localhost:9200", index_name="test-metadata"
)
query = "What did the president say about Ketanji Brown Jackson"
docs = db.similarity_search(query)
print(docs[0].metadata)
docs = db.similarity_search(
query, filter=[{"term": {"metadata.author.keyword": "John Doe"}}]
)
print(docs[0].metadata)
docs = db.similarity_search(
query,
filter=[{"match": {"metadata.author": {"query": "Jon", "fuzziness": "AUTO"}}}],
)
print(docs[0].metadata)
docs = db.similarity_search(
"Any mention about Fred?",
filter=[{"range": {"metadata.date": {"gte": "2010-01-01"}}}],
)
print(docs[0].metadata)
docs = db.similarity_search(
"Any mention about Fred?", filter=[{"range": {"metadata.rating": {"gte": 2}}}]
)
print(docs[0].metadata)
docs = db.similarity_search(
"Any mention about Fred?",
filter=[
{
"geo_distance": {
"distance": "200km",
"metadata.geo_location": {"lat": 40, "lon": -70},
}
}
],
)
print(docs[0].metadata)
db = ElasticsearchStore.from_documents(
docs,
embeddings,
es_url="http://localhost:9200",
index_name="test",
strategy=ElasticsearchStore.ApproxRetrievalStrategy(),
)
docs = db.similarity_search(
query="What did the president say about Ketanji Brown Jackson?", k=10
)
APPROX_SELF_DEPLOYED_INDEX_NAME = "test-approx-self-deployed"
db = ElasticsearchStore(
es_cloud_id="<your cloud id>",
es_user="elastic",
es_password="<your password>",
index_name=APPROX_SELF_DEPLOYED_INDEX_NAME,
query_field="text_field",
vector_query_field="vector_query_field.predicted_value",
strategy=ElasticsearchStore.ApproxRetrievalStrategy(
query_model_id="sentence-transformers__all-minilm-l6-v2"
),
)
db.client.ingest.put_pipeline(
id="test_pipeline",
processors=[
{
"inference": {
"model_id": "sentence-transformers__all-minilm-l6-v2",
"field_map": {"query_field": "text_field"},
"target_field": "vector_query_field",
}
}
],
)
db.client.indices.create(
index=APPROX_SELF_DEPLOYED_INDEX_NAME,
mappings={
"properties": {
"text_field": {"type": "text"},
"vector_query_field": {
"properties": {
"predicted_value": {
"type": "dense_vector",
"dims": 384,
"index": True,
"similarity": "l2_norm",
}
}
},
}
},
settings={"index": {"default_pipeline": "test_pipeline"}},
)
db.from_texts(
["hello world"],
es_cloud_id="<cloud id>",
es_user="elastic",
es_password="<cloud password>",
index_name=APPROX_SELF_DEPLOYED_INDEX_NAME,
query_field="text_field",
vector_query_field="vector_query_field.predicted_value",
strategy=ElasticsearchStore.ApproxRetrievalStrategy(
query_model_id="sentence-transformers__all-minilm-l6-v2"
),
)
db.similarity_search("hello world", k=10)
db = ElasticsearchStore.from_documents(
docs,
es_cloud_id="My_deployment:dXMtY2VudHJhbDEuZ2NwLmNsb3VkLmVzLmlvOjQ0MyQ2OGJhMjhmNDc1M2Y0MWVjYTk2NzI2ZWNkMmE5YzRkNyQ3NWI4ODRjNWQ2OTU0MTYzODFjOTkxNmQ1YzYxMGI1Mw==",
es_user="elastic",
es_password="GgUPiWKwEzgHIYdHdgPk1Lwi",
index_name="test-elser",
strategy= | ElasticsearchStore.SparseVectorRetrievalStrategy() | langchain_elasticsearch.ElasticsearchStore.SparseVectorRetrievalStrategy |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet polars')
import polars as pl
df = pl.read_csv("example_data/mlb_teams_2012.csv")
df.head()
from langchain_community.document_loaders import PolarsDataFrameLoader
loader = | PolarsDataFrameLoader(df, page_content_column="Team") | langchain_community.document_loaders.PolarsDataFrameLoader |
from langchain.agents import AgentType, initialize_agent, load_tools
from langchain_openai import OpenAI
llm = OpenAI(temperature=0)
tools = load_tools(["google-serper"], llm=llm)
agent = initialize_agent(
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
agent.run("What is the weather in Pomfret?")
tools = | load_tools(["searchapi"], llm=llm) | langchain.agents.load_tools |
from langchain_experimental.pal_chain import PALChain
from langchain_openai import OpenAI
llm = | OpenAI(temperature=0, max_tokens=512) | langchain_openai.OpenAI |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai context-python')
import os
from langchain.callbacks import ContextCallbackHandler
token = os.environ["CONTEXT_API_TOKEN"]
context_callback = ContextCallbackHandler(token)
import os
from langchain.callbacks import ContextCallbackHandler
from langchain.schema import (
HumanMessage,
SystemMessage,
)
from langchain_openai import ChatOpenAI
token = os.environ["CONTEXT_API_TOKEN"]
chat = ChatOpenAI(
headers={"user_id": "123"}, temperature=0, callbacks=[ContextCallbackHandler(token)]
)
messages = [
SystemMessage(
content="You are a helpful assistant that translates English to French."
),
| HumanMessage(content="I love programming.") | langchain.schema.HumanMessage |
from langchain_openai import ChatOpenAI
model = ChatOpenAI(temperature=0, model="gpt-4-turbo-preview")
from langchain import hub
from langchain_core.prompts import PromptTemplate
select_prompt = hub.pull("hwchase17/self-discovery-select")
select_prompt.pretty_print()
adapt_prompt = hub.pull("hwchase17/self-discovery-adapt")
adapt_prompt.pretty_print()
structured_prompt = hub.pull("hwchase17/self-discovery-structure")
structured_prompt.pretty_print()
reasoning_prompt = hub.pull("hwchase17/self-discovery-reasoning")
reasoning_prompt.pretty_print()
reasoning_prompt
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
select_chain = select_prompt | model | | StrOutputParser() | langchain_core.output_parsers.StrOutputParser |
get_ipython().run_line_magic('pip', 'install -upgrade --quiet langchain-google-memorystore-redis')
PROJECT_ID = "my-project-id" # @param {type:"string"}
get_ipython().system('gcloud config set project {PROJECT_ID}')
from google.colab import auth
auth.authenticate_user()
import redis
from langchain_google_memorystore_redis import (
DistanceStrategy,
HNSWConfig,
RedisVectorStore,
)
redis_client = redis.from_url("redis://127.0.0.1:6379")
index_config = HNSWConfig(
name="my_vector_index", distance_strategy=DistanceStrategy.COSINE, vector_size=128
)
RedisVectorStore.init_index(client=redis_client, index_config=index_config)
from langchain_community.document_loaders import TextLoader
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("./state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
from langchain_community.embeddings.fake import FakeEmbeddings
embeddings = | FakeEmbeddings(size=128) | langchain_community.embeddings.fake.FakeEmbeddings |
from langchain.indexes import VectorstoreIndexCreator
from langchain_community.document_loaders import ModernTreasuryLoader
modern_treasury_loader = | ModernTreasuryLoader("payment_orders") | langchain_community.document_loaders.ModernTreasuryLoader |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-nvidia-ai-endpoints')
import getpass
import os
if not os.environ.get("NVIDIA_API_KEY", "").startswith("nvapi-"):
nvapi_key = getpass.getpass("Enter your NVIDIA API key: ")
assert nvapi_key.startswith("nvapi-"), f"{nvapi_key[:5]}... is not a valid key"
os.environ["NVIDIA_API_KEY"] = nvapi_key
from langchain_nvidia_ai_endpoints import ChatNVIDIA
llm = ChatNVIDIA(model="mixtral_8x7b")
result = llm.invoke("Write a ballad about LangChain.")
print(result.content)
print(llm.batch(["What's 2*3?", "What's 2*6?"]))
for chunk in llm.stream("How far can a seagull fly in one day?"):
print(chunk.content, end="|")
async for chunk in llm.astream(
"How long does it take for monarch butterflies to migrate?"
):
print(chunk.content, end="|")
ChatNVIDIA.get_available_models()
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_nvidia_ai_endpoints import ChatNVIDIA
prompt = ChatPromptTemplate.from_messages(
[("system", "You are a helpful AI assistant named Fred."), ("user", "{input}")]
)
chain = prompt | | ChatNVIDIA(model="llama2_13b") | langchain_nvidia_ai_endpoints.ChatNVIDIA |
from langchain.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from langchain_community.chat_models import JinaChat
from langchain_core.messages import HumanMessage, SystemMessage
chat = | JinaChat(temperature=0) | langchain_community.chat_models.JinaChat |
from langchain.chains import GraphSparqlQAChain
from langchain_community.graphs import RdfGraph
from langchain_openai import ChatOpenAI
graph = RdfGraph(
source_file="http://www.w3.org/People/Berners-Lee/card",
standard="rdf",
local_copy="test.ttl",
)
graph.load_schema()
graph.get_schema
chain = GraphSparqlQAChain.from_llm(
ChatOpenAI(temperature=0), graph=graph, verbose=True
)
chain.run("What is Tim Berners-Lee's work homepage?")
chain.run(
"Save that the person with the name 'Timothy Berners-Lee' has a work homepage at 'http://www.w3.org/foo/bar/'"
)
query = (
"""PREFIX foaf: <http://xmlns.com/foaf/0.1/>\n"""
"""SELECT ?hp\n"""
"""WHERE {\n"""
""" ?person foaf:name "Timothy Berners-Lee" . \n"""
""" ?person foaf:workplaceHomepage ?hp .\n"""
"""}"""
)
graph.query(query)
chain = GraphSparqlQAChain.from_llm(
| ChatOpenAI(temperature=0) | langchain_openai.ChatOpenAI |
meals = [
"Beef Enchiladas with Feta cheese. Mexican-Greek fusion",
"Chicken Flatbreads with red sauce. Italian-Mexican fusion",
"Veggie sweet potato quesadillas with vegan cheese",
"One-Pan Tortelonni bake with peppers and onions",
]
from langchain_openai import OpenAI
llm = OpenAI(model="gpt-3.5-turbo-instruct")
from langchain.prompts import PromptTemplate
PROMPT_TEMPLATE = """Here is the description of a meal: "{meal}".
Embed the meal into the given text: "{text_to_personalize}".
Prepend a personalized message including the user's name "{user}"
and their preference "{preference}".
Make it sound good.
"""
PROMPT = PromptTemplate(
input_variables=["meal", "text_to_personalize", "user", "preference"],
template=PROMPT_TEMPLATE,
)
import langchain_experimental.rl_chain as rl_chain
chain = rl_chain.PickBest.from_llm(llm=llm, prompt=PROMPT)
response = chain.run(
meal=rl_chain.ToSelectFrom(meals),
user=rl_chain.BasedOn("Tom"),
preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]),
text_to_personalize="This is the weeks specialty dish, our master chefs \
believe you will love it!",
)
print(response["response"])
for _ in range(5):
try:
response = chain.run(
meal=rl_chain.ToSelectFrom(meals),
user=rl_chain.BasedOn("Tom"),
preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]),
text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!",
)
except Exception as e:
print(e)
print(response["response"])
print()
scoring_criteria_template = (
"Given {preference} rank how good or bad this selection is {meal}"
)
chain = rl_chain.PickBest.from_llm(
llm=llm,
prompt=PROMPT,
selection_scorer=rl_chain.AutoSelectionScorer(
llm=llm, scoring_criteria_template_str=scoring_criteria_template
),
)
response = chain.run(
meal=rl_chain.ToSelectFrom(meals),
user=rl_chain.BasedOn("Tom"),
preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]),
text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!",
)
print(response["response"])
selection_metadata = response["selection_metadata"]
print(
f"selected index: {selection_metadata.selected.index}, score: {selection_metadata.selected.score}"
)
class CustomSelectionScorer(rl_chain.SelectionScorer):
def score_response(
self, inputs, llm_response: str, event: rl_chain.PickBestEvent
) -> float:
print(event.based_on)
print(event.to_select_from)
selected_meal = event.to_select_from["meal"][event.selected.index]
print(f"selected meal: {selected_meal}")
if "Tom" in event.based_on["user"]:
if "Vegetarian" in event.based_on["preference"]:
if "Chicken" in selected_meal or "Beef" in selected_meal:
return 0.0
else:
return 1.0
else:
if "Chicken" in selected_meal or "Beef" in selected_meal:
return 1.0
else:
return 0.0
else:
raise NotImplementedError("I don't know how to score this user")
chain = rl_chain.PickBest.from_llm(
llm=llm,
prompt=PROMPT,
selection_scorer=CustomSelectionScorer(),
)
response = chain.run(
meal=rl_chain.ToSelectFrom(meals),
user=rl_chain.BasedOn("Tom"),
preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]),
text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!",
)
class CustomSelectionScorer(rl_chain.SelectionScorer):
def score_preference(self, preference, selected_meal):
if "Vegetarian" in preference:
if "Chicken" in selected_meal or "Beef" in selected_meal:
return 0.0
else:
return 1.0
else:
if "Chicken" in selected_meal or "Beef" in selected_meal:
return 1.0
else:
return 0.0
def score_response(
self, inputs, llm_response: str, event: rl_chain.PickBestEvent
) -> float:
selected_meal = event.to_select_from["meal"][event.selected.index]
if "Tom" in event.based_on["user"]:
return self.score_preference(event.based_on["preference"], selected_meal)
elif "Anna" in event.based_on["user"]:
return self.score_preference(event.based_on["preference"], selected_meal)
else:
raise NotImplementedError("I don't know how to score this user")
chain = rl_chain.PickBest.from_llm(
llm=llm,
prompt=PROMPT,
selection_scorer=CustomSelectionScorer(),
metrics_step=5,
metrics_window_size=5, # rolling window average
)
random_chain = rl_chain.PickBest.from_llm(
llm=llm,
prompt=PROMPT,
selection_scorer=CustomSelectionScorer(),
metrics_step=5,
metrics_window_size=5, # rolling window average
policy=rl_chain.PickBestRandomPolicy, # set the random policy instead of default
)
for _ in range(20):
try:
chain.run(
meal=rl_chain.ToSelectFrom(meals),
user=rl_chain.BasedOn("Tom"),
preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]),
text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!",
)
random_chain.run(
meal=rl_chain.ToSelectFrom(meals),
user=rl_chain.BasedOn("Tom"),
preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]),
text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!",
)
chain.run(
meal=rl_chain.ToSelectFrom(meals),
user=rl_chain.BasedOn("Anna"),
preference=rl_chain.BasedOn(["Loves meat", "especially beef"]),
text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!",
)
random_chain.run(
meal=rl_chain.ToSelectFrom(meals),
user= | rl_chain.BasedOn("Anna") | langchain_experimental.rl_chain.BasedOn |
import os
from langchain.chains import ConversationalRetrievalChain
from langchain_community.vectorstores import Vectara
from langchain_openai import OpenAI
from langchain_community.document_loaders import TextLoader
loader = TextLoader("state_of_the_union.txt")
documents = loader.load()
vectara = Vectara.from_documents(documents, embedding=None)
from langchain.memory import ConversationBufferMemory
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
openai_api_key = os.environ["OPENAI_API_KEY"]
llm = OpenAI(openai_api_key=openai_api_key, temperature=0)
retriever = vectara.as_retriever()
d = retriever.get_relevant_documents(
"What did the president say about Ketanji Brown Jackson", k=2
)
print(d)
bot = ConversationalRetrievalChain.from_llm(
llm, retriever, memory=memory, verbose=False
)
query = "What did the president say about Ketanji Brown Jackson"
result = bot.invoke({"question": query})
result["answer"]
query = "Did he mention who she suceeded"
result = bot.invoke({"question": query})
result["answer"]
bot = ConversationalRetrievalChain.from_llm(
OpenAI(temperature=0), vectara.as_retriever()
)
chat_history = []
query = "What did the president say about Ketanji Brown Jackson"
result = bot.invoke({"question": query, "chat_history": chat_history})
result["answer"]
chat_history = [(query, result["answer"])]
query = "Did he mention who she suceeded"
result = bot.invoke({"question": query, "chat_history": chat_history})
result["answer"]
bot = ConversationalRetrievalChain.from_llm(
llm, vectara.as_retriever(), return_source_documents=True
)
chat_history = []
query = "What did the president say about Ketanji Brown Jackson"
result = bot.invoke({"question": query, "chat_history": chat_history})
result["source_documents"][0]
from langchain.chains import LLMChain
from langchain.chains.conversational_retrieval.prompts import CONDENSE_QUESTION_PROMPT
from langchain.chains.question_answering import load_qa_chain
question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT)
doc_chain = load_qa_chain(llm, chain_type="map_reduce")
chain = ConversationalRetrievalChain(
retriever=vectara.as_retriever(),
question_generator=question_generator,
combine_docs_chain=doc_chain,
)
chat_history = []
query = "What did the president say about Ketanji Brown Jackson"
result = chain({"question": query, "chat_history": chat_history})
result["answer"]
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT)
doc_chain = load_qa_with_sources_chain(llm, chain_type="map_reduce")
chain = ConversationalRetrievalChain(
retriever=vectara.as_retriever(),
question_generator=question_generator,
combine_docs_chain=doc_chain,
)
chat_history = []
query = "What did the president say about Ketanji Brown Jackson"
result = chain({"question": query, "chat_history": chat_history})
result["answer"]
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.chains.conversational_retrieval.prompts import (
CONDENSE_QUESTION_PROMPT,
QA_PROMPT,
)
from langchain.chains.llm import LLMChain
from langchain.chains.question_answering import load_qa_chain
llm = OpenAI(temperature=0, openai_api_key=openai_api_key)
streaming_llm = OpenAI(
streaming=True,
callbacks=[StreamingStdOutCallbackHandler()],
temperature=0,
openai_api_key=openai_api_key,
)
question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT)
doc_chain = | load_qa_chain(streaming_llm, chain_type="stuff", prompt=QA_PROMPT) | langchain.chains.question_answering.load_qa_chain |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
from langchain_community.chat_models import ChatAnthropic
from langchain_openai import ChatOpenAI
from unittest.mock import patch
import httpx
from openai import RateLimitError
request = httpx.Request("GET", "/")
response = httpx.Response(200, request=request)
error = RateLimitError("rate limit", response=response, body="")
openai_llm = ChatOpenAI(max_retries=0)
anthropic_llm = ChatAnthropic()
llm = openai_llm.with_fallbacks([anthropic_llm])
with patch("openai.resources.chat.completions.Completions.create", side_effect=error):
try:
print(openai_llm.invoke("Why did the chicken cross the road?"))
except RateLimitError:
print("Hit error")
with patch("openai.resources.chat.completions.Completions.create", side_effect=error):
try:
print(llm.invoke("Why did the chicken cross the road?"))
except RateLimitError:
print("Hit error")
from langchain_core.prompts import ChatPromptTemplate
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You're a nice assistant who always includes a compliment in your response",
),
("human", "Why did the {animal} cross the road"),
]
)
chain = prompt | llm
with patch("openai.resources.chat.completions.Completions.create", side_effect=error):
try:
print(chain.invoke({"animal": "kangaroo"}))
except RateLimitError:
print("Hit error")
from langchain_core.output_parsers import StrOutputParser
chat_prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You're a nice assistant who always includes a compliment in your response",
),
("human", "Why did the {animal} cross the road"),
]
)
chat_model = ChatOpenAI(model_name="gpt-fake")
bad_chain = chat_prompt | chat_model | StrOutputParser()
from langchain.prompts import PromptTemplate
from langchain_openai import OpenAI
prompt_template = """Instructions: You should always include a compliment in your response.
Question: Why did the {animal} cross the road?"""
prompt = PromptTemplate.from_template(prompt_template)
llm = OpenAI()
good_chain = prompt | llm
chain = bad_chain.with_fallbacks([good_chain])
chain.invoke({"animal": "turtle"})
short_llm = ChatOpenAI()
long_llm = ChatOpenAI(model="gpt-3.5-turbo-16k")
llm = short_llm.with_fallbacks([long_llm])
inputs = "What is the next number: " + ", ".join(["one", "two"] * 3000)
try:
print(short_llm.invoke(inputs))
except Exception as e:
print(e)
try:
print(llm.invoke(inputs))
except Exception as e:
print(e)
from langchain.output_parsers import DatetimeOutputParser
prompt = | ChatPromptTemplate.from_template(
"what time was {event} (in %Y-%m-%dT%H:%M:%S.%fZ format - only return this value)"
) | langchain_core.prompts.ChatPromptTemplate.from_template |
from langchain_community.llms import Ollama
llm = Ollama(model="llama2")
llm("The first man on the moon was ...")
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
llm = Ollama(
model="llama2", callback_manager=CallbackManager([StreamingStdOutCallbackHandler()])
)
llm("The first man on the moon was ...")
from langchain_community.llms import Ollama
llm = Ollama(model="llama2:13b")
llm("The first man on the moon was ... think step by step")
get_ipython().run_line_magic('env', 'CMAKE_ARGS="-DLLAMA_METAL=on"')
get_ipython().run_line_magic('env', 'FORCE_CMAKE=1')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet llama-cpp-python --no-cache-dirclear')
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain_community.llms import LlamaCpp
llm = LlamaCpp(
model_path="/Users/rlm/Desktop/Code/llama.cpp/models/openorca-platypus2-13b.gguf.q4_0.bin",
n_gpu_layers=1,
n_batch=512,
n_ctx=2048,
f16_kv=True,
callback_manager=CallbackManager([ | StreamingStdOutCallbackHandler() | langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-robocorp')
from langchain.agents import AgentExecutor, OpenAIFunctionsAgent
from langchain_core.messages import SystemMessage
from langchain_openai import ChatOpenAI
from langchain_robocorp import ActionServerToolkit
llm = ChatOpenAI(model="gpt-4", temperature=0)
toolkit = ActionServerToolkit(url="http://localhost:8080", report_trace=True)
tools = toolkit.get_tools()
system_message = SystemMessage(content="You are a helpful assistant")
prompt = OpenAIFunctionsAgent.create_prompt(system_message)
agent = | OpenAIFunctionsAgent(llm=llm, prompt=prompt, tools=tools) | langchain.agents.OpenAIFunctionsAgent |
from langchain.chains import RetrievalQA
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAI, OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
llm = OpenAI(temperature=0)
from pathlib import Path
relevant_parts = []
for p in Path(".").absolute().parts:
relevant_parts.append(p)
if relevant_parts[-3:] == ["langchain", "docs", "modules"]:
break
doc_path = str(Path(*relevant_parts) / "state_of_the_union.txt")
from langchain_community.document_loaders import TextLoader
loader = TextLoader(doc_path)
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
docsearch = Chroma.from_documents(texts, embeddings, collection_name="state-of-union")
state_of_union = RetrievalQA.from_chain_type(
llm=llm, chain_type="stuff", retriever=docsearch.as_retriever()
)
from langchain_community.document_loaders import WebBaseLoader
loader = WebBaseLoader("https://beta.ruff.rs/docs/faq/")
docs = loader.load()
ruff_texts = text_splitter.split_documents(docs)
ruff_db = Chroma.from_documents(ruff_texts, embeddings, collection_name="ruff")
ruff = RetrievalQA.from_chain_type(
llm=llm, chain_type="stuff", retriever=ruff_db.as_retriever()
)
from langchain.agents import AgentType, Tool, initialize_agent
from langchain_openai import OpenAI
tools = [
Tool(
name="State of Union QA System",
func=state_of_union.run,
description="useful for when you need to answer questions about the most recent state of the union address. Input should be a fully formed question.",
),
Tool(
name="Ruff QA System",
func=ruff.run,
description="useful for when you need to answer questions about ruff (a python linter). Input should be a fully formed question.",
),
]
agent = initialize_agent(
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
agent.run(
"What did biden say about ketanji brown jackson in the state of the union address?"
)
agent.run("Why use ruff over flake8?")
tools = [
Tool(
name="State of Union QA System",
func=state_of_union.run,
description="useful for when you need to answer questions about the most recent state of the union address. Input should be a fully formed question.",
return_direct=True,
),
| Tool(
name="Ruff QA System",
func=ruff.run,
description="useful for when you need to answer questions about ruff (a python linter) | langchain.agents.Tool |
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings.fake import FakeEmbeddings
from langchain_community.vectorstores import Vectara
from langchain_text_splitters import CharacterTextSplitter
loader = | TextLoader("state_of_the_union.txt") | langchain_community.document_loaders.TextLoader |
import json
from pprint import pprint
from langchain.globals import set_debug
from langchain_community.llms import NIBittensorLLM
set_debug(True)
llm_sys = NIBittensorLLM(
system_prompt="Your task is to determine response based on user prompt.Explain me like I am technical lead of a project"
)
sys_resp = llm_sys(
"What is bittensor and What are the potential benefits of decentralized AI?"
)
print(f"Response provided by LLM with system prompt set is : {sys_resp}")
""" {
"choices": [
{"index": Bittensor's Metagraph index number,
"uid": Unique Identifier of a miner,
"responder_hotkey": Hotkey of a miner,
"message":{"role":"assistant","content": Contains actual response},
"response_ms": Time in millisecond required to fetch response from a miner}
]
} """
multi_response_llm = NIBittensorLLM(top_responses=10)
multi_resp = multi_response_llm("What is Neural Network Feeding Mechanism?")
json_multi_resp = json.loads(multi_resp)
pprint(json_multi_resp)
from langchain.chains import LLMChain
from langchain.globals import set_debug
from langchain.prompts import PromptTemplate
from langchain_community.llms import NIBittensorLLM
set_debug(True)
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate.from_template(template)
llm = NIBittensorLLM(
system_prompt="Your task is to determine response based on user prompt."
)
llm_chain = LLMChain(prompt=prompt, llm=llm)
question = "What is bittensor?"
llm_chain.run(question)
from langchain.tools import Tool
from langchain_community.utilities import GoogleSearchAPIWrapper
search = | GoogleSearchAPIWrapper() | langchain_community.utilities.GoogleSearchAPIWrapper |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet docx2txt')
from langchain_community.document_loaders import Docx2txtLoader
loader = | Docx2txtLoader("example_data/fake.docx") | langchain_community.document_loaders.Docx2txtLoader |
from langchain.output_parsers import (
OutputFixingParser,
PydanticOutputParser,
)
from langchain.prompts import (
PromptTemplate,
)
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_openai import ChatOpenAI, OpenAI
template = """Based on the user question, provide an Action and Action Input for what step should be taken.
{format_instructions}
Question: {query}
Response:"""
class Action(BaseModel):
action: str = Field(description="action to take")
action_input: str = Field(description="input to the action")
parser = | PydanticOutputParser(pydantic_object=Action) | langchain.output_parsers.PydanticOutputParser |
get_ipython().run_line_magic('pip', 'install -qU langchain-community langchain-openai')
from langchain_community.tools import MoveFileTool
from langchain_core.messages import HumanMessage
from langchain_core.utils.function_calling import convert_to_openai_function
from langchain_openai import ChatOpenAI
model = ChatOpenAI(model="gpt-3.5-turbo")
tools = [MoveFileTool()]
functions = [convert_to_openai_function(t) for t in tools]
functions[0]
message = model.invoke(
[HumanMessage(content="move file foo to bar")], functions=functions
)
message
message.additional_kwargs["function_call"]
model_with_functions = model.bind_functions(tools)
model_with_functions.invoke([ | HumanMessage(content="move file foo to bar") | langchain_core.messages.HumanMessage |