prompt
stringlengths 51
10k
| completion
stringlengths 8
362
| api
stringlengths 18
90
|
---|---|---|
get_ipython().run_line_magic('pip', 'install --upgrade --quiet sentence-transformers > /dev/null')
from langchain.chains import LLMChain, StuffDocumentsChain
from langchain.prompts import PromptTemplate
from langchain_community.document_transformers import (
LongContextReorder,
)
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAI
embeddings = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
texts = [
"Basquetball is a great sport.",
"Fly me to the moon is one of my favourite songs.",
"The Celtics are my favourite team.",
"This is a document about the Boston Celtics",
"I simply love going to the movies",
"The Boston Celtics won the game by 20 points",
"This is just a random text.",
"Elden Ring is one of the best games in the last 15 years.",
"L. Kornet is one of the best Celtics players.",
"Larry Bird was an iconic NBA player.",
]
retriever = | Chroma.from_texts(texts, embedding=embeddings) | langchain_community.vectorstores.Chroma.from_texts |
get_ipython().run_line_magic('pip', 'install -qU chromadb langchain langchain-community langchain-openai')
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
loader = TextLoader("../../state_of_the_union.txt")
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
for i, doc in enumerate(texts):
doc.metadata["page_chunk"] = i
embeddings = OpenAIEmbeddings()
vectorstore = Chroma.from_documents(texts, embeddings, collection_name="state-of-union")
retriever = vectorstore.as_retriever()
from langchain.tools.retriever import create_retriever_tool
retriever_tool = create_retriever_tool(
retriever,
"state-of-union-retriever",
"Query a retriever to get information about state of the union address",
)
from typing import List
from langchain_core.pydantic_v1 import BaseModel, Field
class Response(BaseModel):
"""Final response to the question being asked"""
answer: str = Field(description="The final answer to respond to the user")
sources: List[int] = Field(
description="List of page chunks that contain answer to the question. Only include a page chunk if it contains relevant information"
)
import json
from langchain_core.agents import AgentActionMessageLog, AgentFinish
def parse(output):
if "function_call" not in output.additional_kwargs:
return | AgentFinish(return_values={"output": output.content}, log=output.content) | langchain_core.agents.AgentFinish |
get_ipython().run_line_magic('pip', 'install -qU langchain-text-splitters')
from langchain_text_splitters import MarkdownHeaderTextSplitter
markdown_document = "# Foo\n\n ## Bar\n\nHi this is Jim\n\nHi this is Joe\n\n ### Boo \n\n Hi this is Lance \n\n ## Baz\n\n Hi this is Molly"
headers_to_split_on = [
("#", "Header 1"),
("##", "Header 2"),
("###", "Header 3"),
]
markdown_splitter = | MarkdownHeaderTextSplitter(headers_to_split_on=headers_to_split_on) | langchain_text_splitters.MarkdownHeaderTextSplitter |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pyspark')
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
df = spark.read.csv("example_data/mlb_teams_2012.csv", header=True)
from langchain_community.document_loaders import PySparkDataFrameLoader
loader = | PySparkDataFrameLoader(spark, df, page_content_column="Team") | langchain_community.document_loaders.PySparkDataFrameLoader |
import pprint
from typing import Any, Dict
import pandas as pd
from langchain.output_parsers import PandasDataFrameOutputParser
from langchain.prompts import PromptTemplate
from langchain_openai import ChatOpenAI
model = ChatOpenAI(temperature=0)
def format_parser_output(parser_output: Dict[str, Any]) -> None:
for key in parser_output.keys():
parser_output[key] = parser_output[key].to_dict()
return pprint.PrettyPrinter(width=4, compact=True).pprint(parser_output)
df = pd.DataFrame(
{
"num_legs": [2, 4, 8, 0],
"num_wings": [2, 0, 0, 0],
"num_specimen_seen": [10, 2, 1, 8],
}
)
parser = | PandasDataFrameOutputParser(dataframe=df) | langchain.output_parsers.PandasDataFrameOutputParser |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet lm-format-enforcer > /dev/null')
import logging
from langchain_experimental.pydantic_v1 import BaseModel
logging.basicConfig(level=logging.ERROR)
class PlayerInformation(BaseModel):
first_name: str
last_name: str
num_seasons_in_nba: int
year_of_birth: int
import torch
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer
model_id = "meta-llama/Llama-2-7b-chat-hf"
device = "cuda"
if torch.cuda.is_available():
config = AutoConfig.from_pretrained(model_id)
config.pretraining_tp = 1
model = AutoModelForCausalLM.from_pretrained(
model_id,
config=config,
torch_dtype=torch.float16,
load_in_8bit=True,
device_map="auto",
)
else:
raise Exception("GPU not available")
tokenizer = AutoTokenizer.from_pretrained(model_id)
if tokenizer.pad_token_id is None:
tokenizer.pad_token_id = tokenizer.eos_token_id
DEFAULT_SYSTEM_PROMPT = """\
You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.\n\nIf a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.\
"""
prompt = """Please give me information about {player_name}. You must respond using JSON format, according to the following schema:
{arg_schema}
"""
def make_instruction_prompt(message):
return f"[INST] <<SYS>>\n{DEFAULT_SYSTEM_PROMPT}\n<</SYS>> {message} [/INST]"
def get_prompt(player_name):
return make_instruction_prompt(
prompt.format(
player_name=player_name, arg_schema=PlayerInformation.schema_json()
)
)
from langchain_community.llms import HuggingFacePipeline
from transformers import pipeline
hf_model = pipeline(
"text-generation", model=model, tokenizer=tokenizer, max_new_tokens=200
)
original_model = HuggingFacePipeline(pipeline=hf_model)
generated = original_model.predict(get_prompt("Michael Jordan"))
print(generated)
from langchain_experimental.llms import LMFormatEnforcer
lm_format_enforcer = LMFormatEnforcer(
json_schema=PlayerInformation.schema(), pipeline=hf_model
)
results = lm_format_enforcer.predict(get_prompt("Michael Jordan"))
print(results)
prompts = [
get_prompt(name) for name in ["Michael Jordan", "Kareem Abdul Jabbar", "Tim Duncan"]
]
results = lm_format_enforcer.generate(prompts)
for generation in results.generations:
print(generation[0].text)
question_prompt = "When was Michael Jordan Born? Please answer in mm/dd/yyyy format."
date_regex = r"(0?[1-9]|1[0-2])\/(0?[1-9]|1\d|2\d|3[01])\/(19|20)\d{2}"
answer_regex = " In mm/dd/yyyy format, Michael Jordan was born in " + date_regex
lm_format_enforcer = | LMFormatEnforcer(regex=answer_regex, pipeline=hf_model) | langchain_experimental.llms.LMFormatEnforcer |
get_ipython().system('pip install --quiet langchain_experimental langchain_openai')
with open("../../state_of_the_union.txt") as f:
state_of_the_union = f.read()
from langchain_experimental.text_splitter import SemanticChunker
from langchain_openai.embeddings import OpenAIEmbeddings
text_splitter = SemanticChunker(OpenAIEmbeddings())
docs = text_splitter.create_documents([state_of_the_union])
print(docs[0].page_content)
text_splitter = SemanticChunker(
OpenAIEmbeddings(), breakpoint_threshold_type="percentile"
)
docs = text_splitter.create_documents([state_of_the_union])
print(docs[0].page_content)
print(len(docs))
text_splitter = SemanticChunker(
| OpenAIEmbeddings() | langchain_openai.embeddings.OpenAIEmbeddings |
from langchain_community.chat_models import ChatDatabricks
from langchain_core.messages import HumanMessage
from mlflow.deployments import get_deploy_client
client = get_deploy_client("databricks")
secret = "secrets/<scope>/openai-api-key" # replace `<scope>` with your scope
name = "my-chat" # rename this if my-chat already exists
client.create_endpoint(
name=name,
config={
"served_entities": [
{
"name": "my-chat",
"external_model": {
"name": "gpt-4",
"provider": "openai",
"task": "llm/v1/chat",
"openai_config": {
"openai_api_key": "{{" + secret + "}}",
},
},
}
],
},
)
chat = ChatDatabricks(
target_uri="databricks",
endpoint=name,
temperature=0.1,
)
chat([ | HumanMessage(content="hello") | langchain_core.messages.HumanMessage |
get_ipython().system('pip install -U oci')
from langchain_community.llms import OCIGenAI
llm = OCIGenAI(
model_id="MY_MODEL",
service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com",
compartment_id="MY_OCID",
)
response = llm.invoke("Tell me one fact about earth", temperature=0.7)
print(response)
from langchain.chains import LLMChain
from langchain_core.prompts import PromptTemplate
llm = OCIGenAI(
model_id="MY_MODEL",
service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com",
compartment_id="MY_OCID",
auth_type="SECURITY_TOKEN",
auth_profile="MY_PROFILE", # replace with your profile name
model_kwargs={"temperature": 0.7, "top_p": 0.75, "max_tokens": 200},
)
prompt = | PromptTemplate(input_variables=["query"], template="{query}") | langchain_core.prompts.PromptTemplate |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet clearml')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pandas')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet textstat')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet spacy')
get_ipython().system('python -m spacy download en_core_web_sm')
import os
os.environ["CLEARML_API_ACCESS_KEY"] = ""
os.environ["CLEARML_API_SECRET_KEY"] = ""
os.environ["OPENAI_API_KEY"] = ""
os.environ["SERPAPI_API_KEY"] = ""
from langchain.callbacks import ClearMLCallbackHandler
from langchain.callbacks import StdOutCallbackHandler
from langchain_openai import OpenAI
clearml_callback = ClearMLCallbackHandler(
task_type="inference",
project_name="langchain_callback_demo",
task_name="llm",
tags=["test"],
visualize=True,
complexity_metrics=True,
stream_logs=True,
)
callbacks = [StdOutCallbackHandler(), clearml_callback]
llm = OpenAI(temperature=0, callbacks=callbacks)
llm_result = llm.generate(["Tell me a joke", "Tell me a poem"] * 3)
clearml_callback.flush_tracker(langchain_asset=llm, name="simple_sequential")
from langchain.agents import AgentType, initialize_agent, load_tools
tools = | load_tools(["serpapi", "llm-math"], llm=llm, callbacks=callbacks) | langchain.agents.load_tools |
import asyncio
from langchain.callbacks import get_openai_callback
from langchain_openai import OpenAI
llm = OpenAI(temperature=0)
with get_openai_callback() as cb:
llm("What is the square root of 4?")
total_tokens = cb.total_tokens
assert total_tokens > 0
with get_openai_callback() as cb:
llm("What is the square root of 4?")
llm("What is the square root of 4?")
assert cb.total_tokens == total_tokens * 2
with | get_openai_callback() | langchain.callbacks.get_openai_callback |
import asyncio
import os
import nest_asyncio
import pandas as pd
from langchain.docstore.document import Document
from langchain_community.agent_toolkits.pandas.base import create_pandas_dataframe_agent
from langchain_experimental.autonomous_agents import AutoGPT
from langchain_openai import ChatOpenAI
nest_asyncio.apply()
llm = ChatOpenAI(model_name="gpt-4", temperature=1.0)
import os
from contextlib import contextmanager
from typing import Optional
from langchain.agents import tool
from langchain_community.tools.file_management.read import ReadFileTool
from langchain_community.tools.file_management.write import WriteFileTool
ROOT_DIR = "./data/"
@contextmanager
def pushd(new_dir):
"""Context manager for changing the current working directory."""
prev_dir = os.getcwd()
os.chdir(new_dir)
try:
yield
finally:
os.chdir(prev_dir)
@tool
def process_csv(
csv_file_path: str, instructions: str, output_path: Optional[str] = None
) -> str:
"""Process a CSV by with pandas in a limited REPL.\
Only use this after writing data to disk as a csv file.\
Any figures must be saved to disk to be viewed by the human.\
Instructions should be written in natural language, not code. Assume the dataframe is already loaded."""
with pushd(ROOT_DIR):
try:
df = pd.read_csv(csv_file_path)
except Exception as e:
return f"Error: {e}"
agent = create_pandas_dataframe_agent(llm, df, max_iterations=30, verbose=True)
if output_path is not None:
instructions += f" Save output to disk at {output_path}"
try:
result = agent.run(instructions)
return result
except Exception as e:
return f"Error: {e}"
async def async_load_playwright(url: str) -> str:
"""Load the specified URLs using Playwright and parse using BeautifulSoup."""
from bs4 import BeautifulSoup
from playwright.async_api import async_playwright
results = ""
async with async_playwright() as p:
browser = await p.chromium.launch(headless=True)
try:
page = await browser.new_page()
await page.goto(url)
page_source = await page.content()
soup = BeautifulSoup(page_source, "html.parser")
for script in soup(["script", "style"]):
script.extract()
text = soup.get_text()
lines = (line.strip() for line in text.splitlines())
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
results = "\n".join(chunk for chunk in chunks if chunk)
except Exception as e:
results = f"Error: {e}"
await browser.close()
return results
def run_async(coro):
event_loop = asyncio.get_event_loop()
return event_loop.run_until_complete(coro)
@tool
def browse_web_page(url: str) -> str:
"""Verbose way to scrape a whole webpage. Likely to cause issues parsing."""
return run_async(async_load_playwright(url))
from langchain.chains.qa_with_sources.loading import (
BaseCombineDocumentsChain,
load_qa_with_sources_chain,
)
from langchain.tools import BaseTool, DuckDuckGoSearchRun
from langchain_text_splitters import RecursiveCharacterTextSplitter
from pydantic import Field
def _get_text_splitter():
return RecursiveCharacterTextSplitter(
chunk_size=500,
chunk_overlap=20,
length_function=len,
)
class WebpageQATool(BaseTool):
name = "query_webpage"
description = (
"Browse a webpage and retrieve the information relevant to the question."
)
text_splitter: RecursiveCharacterTextSplitter = Field(
default_factory=_get_text_splitter
)
qa_chain: BaseCombineDocumentsChain
def _run(self, url: str, question: str) -> str:
"""Useful for browsing websites and scraping the text information."""
result = browse_web_page.run(url)
docs = [Document(page_content=result, metadata={"source": url})]
web_docs = self.text_splitter.split_documents(docs)
results = []
for i in range(0, len(web_docs), 4):
input_docs = web_docs[i : i + 4]
window_result = self.qa_chain(
{"input_documents": input_docs, "question": question},
return_only_outputs=True,
)
results.append(f"Response from window {i} - {window_result}")
results_docs = [
Document(page_content="\n".join(results), metadata={"source": url})
]
return self.qa_chain(
{"input_documents": results_docs, "question": question},
return_only_outputs=True,
)
async def _arun(self, url: str, question: str) -> str:
raise NotImplementedError
query_website_tool = WebpageQATool(qa_chain=load_qa_with_sources_chain(llm))
import faiss
from langchain.docstore import InMemoryDocstore
from langchain_community.vectorstores import FAISS
from langchain_openai import OpenAIEmbeddings
embeddings_model = OpenAIEmbeddings()
embedding_size = 1536
index = faiss.IndexFlatL2(embedding_size)
vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {})
web_search = | DuckDuckGoSearchRun() | langchain.tools.DuckDuckGoSearchRun |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet atlassian-python-api')
import os
from langchain.agents import AgentType, initialize_agent
from langchain_community.agent_toolkits.jira.toolkit import JiraToolkit
from langchain_community.utilities.jira import JiraAPIWrapper
from langchain_openai import OpenAI
os.environ["JIRA_API_TOKEN"] = "abc"
os.environ["JIRA_USERNAME"] = "123"
os.environ["JIRA_INSTANCE_URL"] = "https://jira.atlassian.com"
os.environ["OPENAI_API_KEY"] = "xyz"
llm = OpenAI(temperature=0)
jira = JiraAPIWrapper()
toolkit = | JiraToolkit.from_jira_api_wrapper(jira) | langchain_community.agent_toolkits.jira.toolkit.JiraToolkit.from_jira_api_wrapper |
get_ipython().system('pip3 install clickhouse-sqlalchemy InstructorEmbedding sentence_transformers openai langchain-experimental')
import getpass
from os import environ
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_community.utilities import SQLDatabase
from langchain_experimental.sql.vector_sql import VectorSQLDatabaseChain
from langchain_openai import OpenAI
from sqlalchemy import MetaData, create_engine
MYSCALE_HOST = "msc-4a9e710a.us-east-1.aws.staging.myscale.cloud"
MYSCALE_PORT = 443
MYSCALE_USER = "chatdata"
MYSCALE_PASSWORD = "myscale_rocks"
OPENAI_API_KEY = getpass.getpass("OpenAI API Key:")
engine = create_engine(
f"clickhouse://{MYSCALE_USER}:{MYSCALE_PASSWORD}@{MYSCALE_HOST}:{MYSCALE_PORT}/default?protocol=https"
)
metadata = MetaData(bind=engine)
environ["OPENAI_API_KEY"] = OPENAI_API_KEY
from langchain_community.embeddings import HuggingFaceInstructEmbeddings
from langchain_experimental.sql.vector_sql import VectorSQLOutputParser
output_parser = VectorSQLOutputParser.from_embeddings(
model=HuggingFaceInstructEmbeddings(
model_name="hkunlp/instructor-xl", model_kwargs={"device": "cpu"}
)
)
from langchain.callbacks import StdOutCallbackHandler
from langchain_community.utilities.sql_database import SQLDatabase
from langchain_experimental.sql.prompt import MYSCALE_PROMPT
from langchain_experimental.sql.vector_sql import VectorSQLDatabaseChain
from langchain_openai import OpenAI
chain = VectorSQLDatabaseChain(
llm_chain=LLMChain(
llm=OpenAI(openai_api_key=OPENAI_API_KEY, temperature=0),
prompt=MYSCALE_PROMPT,
),
top_k=10,
return_direct=True,
sql_cmd_parser=output_parser,
database=SQLDatabase(engine, None, metadata),
)
import pandas as pd
pd.DataFrame(
chain.run(
"Please give me 10 papers to ask what is PageRank?",
callbacks=[StdOutCallbackHandler()],
)
)
from langchain.chains.qa_with_sources.retrieval import RetrievalQAWithSourcesChain
from langchain_experimental.retrievers.vector_sql_database import (
VectorSQLDatabaseChainRetriever,
)
from langchain_experimental.sql.prompt import MYSCALE_PROMPT
from langchain_experimental.sql.vector_sql import (
VectorSQLDatabaseChain,
VectorSQLRetrieveAllOutputParser,
)
from langchain_openai import ChatOpenAI
output_parser_retrieve_all = VectorSQLRetrieveAllOutputParser.from_embeddings(
output_parser.model
)
chain = VectorSQLDatabaseChain.from_llm(
llm=OpenAI(openai_api_key=OPENAI_API_KEY, temperature=0),
prompt=MYSCALE_PROMPT,
top_k=10,
return_direct=True,
db=SQLDatabase(engine, None, metadata),
sql_cmd_parser=output_parser_retrieve_all,
native_format=True,
)
retriever = VectorSQLDatabaseChainRetriever(
sql_db_chain=chain, page_content_key="abstract"
)
document_with_metadata_prompt = PromptTemplate(
input_variables=["page_content", "id", "title", "authors", "pubdate", "categories"],
template="Content:\n\tTitle: {title}\n\tAbstract: {page_content}\n\tAuthors: {authors}\n\tDate of Publication: {pubdate}\n\tCategories: {categories}\nSOURCE: {id}",
)
chain = RetrievalQAWithSourcesChain.from_chain_type(
ChatOpenAI(
model_name="gpt-3.5-turbo-16k", openai_api_key=OPENAI_API_KEY, temperature=0.6
),
retriever=retriever,
chain_type="stuff",
chain_type_kwargs={
"document_prompt": document_with_metadata_prompt,
},
return_source_documents=True,
)
ans = chain(
"Please give me 10 papers to ask what is PageRank?",
callbacks=[ | StdOutCallbackHandler() | langchain.callbacks.StdOutCallbackHandler |
from langchain_community.llms import Baseten
mistral = Baseten(model="MODEL_ID", deployment="production")
mistral("What is the Mistral wind?")
from langchain.chains import LLMChain
from langchain.memory import ConversationBufferWindowMemory
from langchain.prompts import PromptTemplate
template = """Assistant is a large language model trained by OpenAI.
Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.
Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.
{history}
Human: {human_input}
Assistant:"""
prompt = PromptTemplate(input_variables=["history", "human_input"], template=template)
chatgpt_chain = LLMChain(
llm=mistral,
llm_kwargs={"max_length": 4096},
prompt=prompt,
verbose=True,
memory= | ConversationBufferWindowMemory(k=2) | langchain.memory.ConversationBufferWindowMemory |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet arxiv')
from langchain import hub
from langchain.agents import AgentExecutor, create_react_agent, load_tools
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(temperature=0.0)
tools = load_tools(
["arxiv"],
)
prompt = | hub.pull("hwchase17/react") | langchain.hub.pull |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langsmith langchainhub --quiet')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-openai tiktoken pandas duckduckgo-search --quiet')
import os
from uuid import uuid4
unique_id = uuid4().hex[0:8]
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_PROJECT"] = f"Tracing Walkthrough - {unique_id}"
os.environ["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com"
os.environ["LANGCHAIN_API_KEY"] = "<YOUR-API-KEY>" # Update to your API key
os.environ["OPENAI_API_KEY"] = "<YOUR-OPENAI-API-KEY>"
from langsmith import Client
client = Client()
from langchain import hub
from langchain.agents import AgentExecutor
from langchain.agents.format_scratchpad.openai_tools import (
format_to_openai_tool_messages,
)
from langchain.agents.output_parsers.openai_tools import OpenAIToolsAgentOutputParser
from langchain_community.tools import DuckDuckGoSearchResults
from langchain_openai import ChatOpenAI
prompt = hub.pull("wfh/langsmith-agent-prompt:5d466cbc")
llm = ChatOpenAI(
model="gpt-3.5-turbo-16k",
temperature=0,
)
tools = [
DuckDuckGoSearchResults(
name="duck_duck_go"
), # General internet search using DuckDuckGo
]
llm_with_tools = llm.bind_tools(tools)
runnable_agent = (
{
"input": lambda x: x["input"],
"agent_scratchpad": lambda x: format_to_openai_tool_messages(
x["intermediate_steps"]
),
}
| prompt
| llm_with_tools
| OpenAIToolsAgentOutputParser()
)
agent_executor = AgentExecutor(
agent=runnable_agent, tools=tools, handle_parsing_errors=True
)
inputs = [
"What is LangChain?",
"What's LangSmith?",
"When was Llama-v2 released?",
"What is the langsmith cookbook?",
"When did langchain first announce the hub?",
]
results = agent_executor.batch([{"input": x} for x in inputs], return_exceptions=True)
results[:2]
outputs = [
"LangChain is an open-source framework for building applications using large language models. It is also the name of the company building LangSmith.",
"LangSmith is a unified platform for debugging, testing, and monitoring language model applications and agents powered by LangChain",
"July 18, 2023",
"The langsmith cookbook is a github repository containing detailed examples of how to use LangSmith to debug, evaluate, and monitor large language model-powered applications.",
"September 5, 2023",
]
dataset_name = f"agent-qa-{unique_id}"
dataset = client.create_dataset(
dataset_name,
description="An example dataset of questions over the LangSmith documentation.",
)
client.create_examples(
inputs=[{"input": query} for query in inputs],
outputs=[{"output": answer} for answer in outputs],
dataset_id=dataset.id,
)
from langchain import hub
from langchain.agents import AgentExecutor, AgentType, initialize_agent, load_tools
from langchain_openai import ChatOpenAI
def create_agent(prompt, llm_with_tools):
runnable_agent = (
{
"input": lambda x: x["input"],
"agent_scratchpad": lambda x: format_to_openai_tool_messages(
x["intermediate_steps"]
),
}
| prompt
| llm_with_tools
| OpenAIToolsAgentOutputParser()
)
return AgentExecutor(agent=runnable_agent, tools=tools, handle_parsing_errors=True)
from langsmith.evaluation import EvaluationResult
from langsmith.schemas import Example, Run
def check_not_idk(run: Run, example: Example):
"""Illustration of a custom evaluator."""
agent_response = run.outputs["output"]
if "don't know" in agent_response or "not sure" in agent_response:
score = 0
else:
score = 1
return EvaluationResult(
key="not_uncertain",
score=score,
)
from typing import List
def max_pred_length(runs: List[Run], examples: List[Example]):
predictions = [len(run.outputs["output"]) for run in runs]
return EvaluationResult(key="max_pred_length", score=max(predictions))
from langchain.evaluation import EvaluatorType
from langchain.smith import RunEvalConfig
evaluation_config = RunEvalConfig(
evaluators=[
check_not_idk,
EvaluatorType.QA,
EvaluatorType.EMBEDDING_DISTANCE,
| RunEvalConfig.LabeledCriteria("helpfulness") | langchain.smith.RunEvalConfig.LabeledCriteria |
from langchain_community.chat_message_histories import StreamlitChatMessageHistory
history = | StreamlitChatMessageHistory(key="chat_messages") | langchain_community.chat_message_histories.StreamlitChatMessageHistory |
import getpass
import os
os.environ["POLYGON_API_KEY"] = getpass.getpass()
from langchain_community.tools.polygon.financials import PolygonFinancials
from langchain_community.tools.polygon.last_quote import PolygonLastQuote
from langchain_community.tools.polygon.ticker_news import PolygonTickerNews
from langchain_community.utilities.polygon import PolygonAPIWrapper
api_wrapper = | PolygonAPIWrapper() | langchain_community.utilities.polygon.PolygonAPIWrapper |
from langchain_community.utilities import DuckDuckGoSearchAPIWrapper
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI
template = """Answer the users question based only on the following context:
<context>
{context}
</context>
Question: {question}
"""
prompt = | ChatPromptTemplate.from_template(template) | langchain_core.prompts.ChatPromptTemplate.from_template |
import os
os.environ["EXA_API_KEY"] = "..."
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-exa')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
from langchain_core.prompts import PromptTemplate
from langchain_core.runnables import RunnableParallel, RunnablePassthrough
from langchain_exa import ExaSearchRetriever, TextContentsOptions
from langchain_openai import ChatOpenAI
retriever = ExaSearchRetriever(
k=5, text_contents_options=TextContentsOptions(max_length=200)
)
prompt = PromptTemplate.from_template(
"""Answer the following query based on the following context:
query: {query}
<context>
{context}
</context"""
)
llm = ChatOpenAI()
chain = (
RunnableParallel({"context": retriever, "query": RunnablePassthrough()})
| prompt
| llm
)
chain.invoke("When is the best time to visit japan?")
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-exa')
from exa_py import Exa
from langchain.agents import tool
exa = Exa(api_key=os.environ["EXA_API_KEY"])
@tool
def search(query: str):
"""Search for a webpage based on the query."""
return exa.search(f"{query}", use_autoprompt=True, num_results=5)
@tool
def find_similar(url: str):
"""Search for webpages similar to a given URL.
The url passed in should be a URL returned from `search`.
"""
return exa.find_similar(url, num_results=5)
@tool
def get_contents(ids: list[str]):
"""Get the contents of a webpage.
The ids passed in should be a list of ids returned from `search`.
"""
return exa.get_contents(ids)
tools = [search, get_contents, find_similar]
from langchain.agents import AgentExecutor, OpenAIFunctionsAgent
from langchain_core.messages import SystemMessage
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(temperature=0)
system_message = SystemMessage(
content="You are a web researcher who answers user questions by looking up information on the internet and retrieving contents of helpful documents. Cite your sources."
)
agent_prompt = | OpenAIFunctionsAgent.create_prompt(system_message) | langchain.agents.OpenAIFunctionsAgent.create_prompt |
from langchain.output_parsers import ResponseSchema, StructuredOutputParser
from langchain.prompts import PromptTemplate
from langchain_openai import ChatOpenAI
response_schemas = [
ResponseSchema(name="answer", description="answer to the user's question"),
ResponseSchema(
name="source",
description="source used to answer the user's question, should be a website.",
),
]
output_parser = | StructuredOutputParser.from_response_schemas(response_schemas) | langchain.output_parsers.StructuredOutputParser.from_response_schemas |
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
get_ipython().system('poetry run pip install replicate')
from getpass import getpass
REPLICATE_API_TOKEN = getpass()
import os
os.environ["REPLICATE_API_TOKEN"] = REPLICATE_API_TOKEN
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_community.llms import Replicate
llm = Replicate(
model="a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5",
model_kwargs={"temperature": 0.75, "max_length": 500, "top_p": 1},
)
prompt = """
User: Answer the following yes/no question by reasoning step by step. Can a dog drive a car?
Assistant:
"""
llm(prompt)
llm = | Replicate(
model="replicate/dolly-v2-12b:ef0e1aefc61f8e096ebe4db6b2bacc297daf2ef6899f0f7e001ec445893500e5"
) | langchain_community.llms.Replicate |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet cohere')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet faiss')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet faiss-cpu')
import getpass
import os
os.environ["COHERE_API_KEY"] = getpass.getpass("Cohere API Key:")
def pretty_print_docs(docs):
print(
f"\n{'-' * 100}\n".join(
[f"Document {i+1}:\n\n" + d.page_content for i, d in enumerate(docs)]
)
)
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings import CohereEmbeddings
from langchain_community.vectorstores import FAISS
from langchain_text_splitters import RecursiveCharacterTextSplitter
documents = | TextLoader("../../modules/state_of_the_union.txt") | langchain_community.document_loaders.TextLoader |
from typing import List
from langchain.output_parsers import PydanticOutputParser
from langchain.prompts import PromptTemplate
from langchain_core.pydantic_v1 import BaseModel, Field, validator
from langchain_openai import ChatOpenAI
model = ChatOpenAI(temperature=0)
class Joke(BaseModel):
setup: str = Field(description="question to set up a joke")
punchline: str = Field(description="answer to resolve the joke")
@validator("setup")
def question_ends_with_question_mark(cls, field):
if field[-1] != "?":
raise ValueError("Badly formed question!")
return field
joke_query = "Tell me a joke."
parser = | PydanticOutputParser(pydantic_object=Joke) | langchain.output_parsers.PydanticOutputParser |
get_ipython().system(' pip install langchain docugami==0.0.8 dgml-utils==0.3.0 pydantic langchainhub chromadb hnswlib --upgrade --quiet')
from pprint import pprint
from docugami import Docugami
from docugami.lib.upload import upload_to_named_docset, wait_for_dgml
DOCSET_NAME = "NTSB Aviation Incident Reports"
FILE_PATHS = [
"/Users/tjaffri/ntsb/Report_CEN23LA277_192541.pdf",
"/Users/tjaffri/ntsb/Report_CEN23LA338_192753.pdf",
"/Users/tjaffri/ntsb/Report_CEN23LA363_192876.pdf",
"/Users/tjaffri/ntsb/Report_CEN23LA394_192995.pdf",
"/Users/tjaffri/ntsb/Report_ERA23LA114_106615.pdf",
"/Users/tjaffri/ntsb/Report_WPR23LA254_192532.pdf",
]
assert len(FILE_PATHS) > 5, "Please provide at least 6 files"
dg_client = Docugami()
dg_docs = upload_to_named_docset(dg_client, FILE_PATHS, DOCSET_NAME)
dgml_paths = wait_for_dgml(dg_client, dg_docs)
pprint(dgml_paths)
from pathlib import Path
from dgml_utils.segmentation import get_chunks_str
dgml_path = dgml_paths[Path(FILE_PATHS[0]).name]
with open(dgml_path, "r") as file:
contents = file.read().encode("utf-8")
chunks = get_chunks_str(
contents,
include_xml_tags=True, # Ensures Docugami XML semantic tags are included in the chunked output (set to False for text-only chunks and tables as Markdown)
max_text_length=1024 * 8, # 8k chars are ~2k tokens for OpenAI.
)
print(f"found {len(chunks)} chunks, here are the first few")
for chunk in chunks[:10]:
print(chunk.text)
with open(dgml_path, "r") as file:
contents = file.read().encode("utf-8")
chunks = get_chunks_str(
contents,
include_xml_tags=False, # text-only chunks and tables as Markdown
max_text_length=1024
* 8, # 8k chars are ~2k tokens for OpenAI. Ref: https://help.openai.com/en/articles/4936856-what-are-tokens-and-how-to-count-them
)
print(f"found {len(chunks)} chunks, here are the first few")
for chunk in chunks[:10]:
print(chunk.text)
import requests
dgml = requests.get(
"https://raw.githubusercontent.com/docugami/dgml-utils/main/python/tests/test_data/article/Jane%20Doe.xml"
).text
chunks = get_chunks_str(dgml, include_xml_tags=True)
len(chunks)
category_counts = {}
for element in chunks:
category = element.structure
if category in category_counts:
category_counts[category] += 1
else:
category_counts[category] = 1
category_counts
table_elements = [c for c in chunks if "table" in c.structure.split()]
print(f"There are {len(table_elements)} tables")
text_elements = [c for c in chunks if "table" not in c.structure.split()]
print(f"There are {len(text_elements)} text elements")
for element in text_elements[:20]:
print(element.text)
print(table_elements[0].text)
chunks_as_text = get_chunks_str(dgml, include_xml_tags=False)
table_elements_as_text = [c for c in chunks_as_text if "table" in c.structure.split()]
print(table_elements_as_text[0].text)
from langchain.prompts import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from langchain_core.output_parsers import StrOutputParser
from langchain_openai import ChatOpenAI
prompt_text = """You are an assistant tasked with summarizing tables and text. \
Give a concise summary of the table or text. Table or text chunk: {element} """
prompt = ChatPromptTemplate.from_template(prompt_text)
model = ChatOpenAI(temperature=0, model="gpt-4")
summarize_chain = {"element": lambda x: x} | prompt | model | StrOutputParser()
tables = [i.text for i in table_elements]
table_summaries = summarize_chain.batch(tables, {"max_concurrency": 5})
import uuid
from langchain.retrievers.multi_vector import MultiVectorRetriever
from langchain.storage import InMemoryStore
from langchain_community.vectorstores.chroma import Chroma
from langchain_core.documents import Document
from langchain_openai import OpenAIEmbeddings
def build_retriever(text_elements, tables, table_summaries):
vectorstore = Chroma(
collection_name="summaries", embedding_function=OpenAIEmbeddings()
)
store = InMemoryStore()
id_key = "doc_id"
retriever = MultiVectorRetriever(
vectorstore=vectorstore,
docstore=store,
id_key=id_key,
)
texts = [i.text for i in text_elements]
doc_ids = [str(uuid.uuid4()) for _ in texts]
retriever.docstore.mset(list(zip(doc_ids, texts)))
table_ids = [str(uuid.uuid4()) for _ in tables]
summary_tables = [
Document(page_content=s, metadata={id_key: table_ids[i]})
for i, s in enumerate(table_summaries)
]
retriever.vectorstore.add_documents(summary_tables)
retriever.docstore.mset(list(zip(table_ids, tables)))
return retriever
retriever = build_retriever(text_elements, tables, table_summaries)
from langchain_core.runnables import RunnablePassthrough
system_prompt = SystemMessagePromptTemplate.from_template(
"You are a helpful assistant that answers questions based on provided context. Your provided context can include text or tables, "
"and may also contain semantic XML markup. Pay attention the semantic XML markup to understand more about the context semantics as "
"well as structure (e.g. lists and tabular layouts expressed with HTML-like tags)"
)
human_prompt = HumanMessagePromptTemplate.from_template(
"""Context:
{context}
Question: {question}"""
)
def build_chain(retriever, model):
prompt = | ChatPromptTemplate.from_messages([system_prompt, human_prompt]) | langchain.prompts.ChatPromptTemplate.from_messages |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain nuclia')
from langchain_community.vectorstores.nucliadb import NucliaDB
API_KEY = "YOUR_API_KEY"
ndb = NucliaDB(knowledge_box="YOUR_KB_ID", local=False, api_key=API_KEY)
from langchain_community.vectorstores.nucliadb import NucliaDB
ndb = | NucliaDB(knowledge_box="YOUR_KB_ID", local=True, backend="http://my-local-server") | langchain_community.vectorstores.nucliadb.NucliaDB |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet jsonformer > /dev/null')
import logging
logging.basicConfig(level=logging.ERROR)
import json
import os
import requests
from langchain.tools import tool
HF_TOKEN = os.environ.get("HUGGINGFACE_API_KEY")
@tool
def ask_star_coder(query: str, temperature: float = 1.0, max_new_tokens: float = 250):
"""Query the BigCode StarCoder model about coding questions."""
url = "https://api-inference.huggingface.co/models/bigcode/starcoder"
headers = {
"Authorization": f"Bearer {HF_TOKEN}",
"content-type": "application/json",
}
payload = {
"inputs": f"{query}\n\nAnswer:",
"temperature": temperature,
"max_new_tokens": int(max_new_tokens),
}
response = requests.post(url, headers=headers, data=json.dumps(payload))
response.raise_for_status()
return json.loads(response.content.decode("utf-8"))
prompt = """You must respond using JSON format, with a single action and single action input.
You may 'ask_star_coder' for help on coding problems.
{arg_schema}
EXAMPLES
----
Human: "So what's all this about a GIL?"
AI Assistant:{{
"action": "ask_star_coder",
"action_input": {{"query": "What is a GIL?", "temperature": 0.0, "max_new_tokens": 100}}"
}}
Observation: "The GIL is python's Global Interpreter Lock"
Human: "Could you please write a calculator program in LISP?"
AI Assistant:{{
"action": "ask_star_coder",
"action_input": {{"query": "Write a calculator program in LISP", "temperature": 0.0, "max_new_tokens": 250}}
}}
Observation: "(defun add (x y) (+ x y))\n(defun sub (x y) (- x y ))"
Human: "What's the difference between an SVM and an LLM?"
AI Assistant:{{
"action": "ask_star_coder",
"action_input": {{"query": "What's the difference between SGD and an SVM?", "temperature": 1.0, "max_new_tokens": 250}}
}}
Observation: "SGD stands for stochastic gradient descent, while an SVM is a Support Vector Machine."
BEGIN! Answer the Human's question as best as you are able.
------
Human: 'What's the difference between an iterator and an iterable?'
AI Assistant:""".format(arg_schema=ask_star_coder.args)
from langchain_community.llms import HuggingFacePipeline
from transformers import pipeline
hf_model = pipeline(
"text-generation", model="cerebras/Cerebras-GPT-590M", max_new_tokens=200
)
original_model = HuggingFacePipeline(pipeline=hf_model)
generated = original_model.predict(prompt, stop=["Observation:", "Human:"])
print(generated)
decoder_schema = {
"title": "Decoding Schema",
"type": "object",
"properties": {
"action": {"type": "string", "default": ask_star_coder.name},
"action_input": {
"type": "object",
"properties": ask_star_coder.args,
},
},
}
from langchain_experimental.llms import JsonFormer
json_former = | JsonFormer(json_schema=decoder_schema, pipeline=hf_model) | langchain_experimental.llms.JsonFormer |
SOURCE = "test" # @param {type:"Query"|"CollectionGroup"|"DocumentReference"|"string"}
get_ipython().run_line_magic('pip', 'install -upgrade --quiet langchain-google-datastore')
PROJECT_ID = "my-project-id" # @param {type:"string"}
get_ipython().system('gcloud config set project {PROJECT_ID}')
from google.colab import auth
auth.authenticate_user()
get_ipython().system('gcloud services enable datastore.googleapis.com')
from langchain_core.documents import Document
from langchain_google_datastore import DatastoreSaver
data = [Document(page_content="Hello, World!")]
saver = DatastoreSaver()
saver.upsert_documents(data)
saver = DatastoreSaver("Collection")
saver.upsert_documents(data)
doc_ids = ["AnotherCollection/doc_id", "foo/bar"]
saver = DatastoreSaver()
saver.upsert_documents(documents=data, document_ids=doc_ids)
from langchain_google_datastore import DatastoreLoader
loader_collection = DatastoreLoader("Collection")
loader_subcollection = DatastoreLoader("Collection/doc/SubCollection")
data_collection = loader_collection.load()
data_subcollection = loader_subcollection.load()
from google.cloud import datastore
client = datastore.Client()
doc_ref = client.collection("foo").document("bar")
loader_document = DatastoreLoader(doc_ref)
data = loader_document.load()
from google.cloud.datastore import CollectionGroup, FieldFilter, Query
col_ref = client.collection("col_group")
collection_group = CollectionGroup(col_ref)
loader_group = DatastoreLoader(collection_group)
col_ref = client.collection("collection")
query = col_ref.where(filter=FieldFilter("region", "==", "west_coast"))
loader_query = DatastoreLoader(query)
saver = | DatastoreSaver() | langchain_google_datastore.DatastoreSaver |
import os
import yaml
get_ipython().system('wget https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml -O openai_openapi.yaml')
get_ipython().system('wget https://www.klarna.com/us/shopping/public/openai/v0/api-docs -O klarna_openapi.yaml')
get_ipython().system('wget https://raw.githubusercontent.com/APIs-guru/openapi-directory/main/APIs/spotify.com/1.0.0/openapi.yaml -O spotify_openapi.yaml')
from langchain_community.agent_toolkits.openapi.spec import reduce_openapi_spec
with open("openai_openapi.yaml") as f:
raw_openai_api_spec = yaml.load(f, Loader=yaml.Loader)
openai_api_spec = | reduce_openapi_spec(raw_openai_api_spec) | langchain_community.agent_toolkits.openapi.spec.reduce_openapi_spec |
get_ipython().system('pip3 install oracle-ads')
import ads
from langchain_community.llms import OCIModelDeploymentVLLM
ads.set_auth("resource_principal")
llm = | OCIModelDeploymentVLLM(endpoint="https://<MD_OCID>/predict", model="model_name") | langchain_community.llms.OCIModelDeploymentVLLM |
import re
from typing import Union
from langchain.agents import (
AgentExecutor,
AgentOutputParser,
LLMSingleActionAgent,
)
from langchain.chains import LLMChain
from langchain.prompts import StringPromptTemplate
from langchain_community.agent_toolkits import NLAToolkit
from langchain_community.tools.plugin import AIPlugin
from langchain_core.agents import AgentAction, AgentFinish
from langchain_openai import OpenAI
llm = OpenAI(temperature=0)
urls = [
"https://datasette.io/.well-known/ai-plugin.json",
"https://api.speak.com/.well-known/ai-plugin.json",
"https://www.wolframalpha.com/.well-known/ai-plugin.json",
"https://www.zapier.com/.well-known/ai-plugin.json",
"https://www.klarna.com/.well-known/ai-plugin.json",
"https://www.joinmilo.com/.well-known/ai-plugin.json",
"https://slack.com/.well-known/ai-plugin.json",
"https://schooldigger.com/.well-known/ai-plugin.json",
]
AI_PLUGINS = [AIPlugin.from_url(url) for url in urls]
from langchain_community.vectorstores import FAISS
from langchain_core.documents import Document
from langchain_openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
docs = [
Document(
page_content=plugin.description_for_model,
metadata={"plugin_name": plugin.name_for_model},
)
for plugin in AI_PLUGINS
]
vector_store = FAISS.from_documents(docs, embeddings)
toolkits_dict = {
plugin.name_for_model: | NLAToolkit.from_llm_and_ai_plugin(llm, plugin) | langchain_community.agent_toolkits.NLAToolkit.from_llm_and_ai_plugin |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
import os
import uuid
uid = uuid.uuid4().hex[:6]
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_API_KEY"] = "YOUR API KEY"
from langsmith.client import Client
client = Client()
import requests
url = "https://raw.githubusercontent.com/langchain-ai/langchain/master/docs/docs/integrations/chat_loaders/example_data/langsmith_chat_dataset.json"
response = requests.get(url)
response.raise_for_status()
data = response.json()
dataset_name = f"Extraction Fine-tuning Dataset {uid}"
ds = client.create_dataset(dataset_name=dataset_name, data_type="chat")
_ = client.create_examples(
inputs=[e["inputs"] for e in data],
outputs=[e["outputs"] for e in data],
dataset_id=ds.id,
)
from langchain_community.chat_loaders.langsmith import LangSmithDatasetChatLoader
loader = LangSmithDatasetChatLoader(dataset_name=dataset_name)
chat_sessions = loader.lazy_load()
from langchain.adapters.openai import convert_messages_for_finetuning
training_data = | convert_messages_for_finetuning(chat_sessions) | langchain.adapters.openai.convert_messages_for_finetuning |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet playwright beautifulsoup4')
get_ipython().system(' playwright install')
from langchain_community.document_loaders import AsyncChromiumLoader
urls = ["https://www.wsj.com"]
loader = AsyncChromiumLoader(urls)
docs = loader.load()
docs[0].page_content[0:100]
from langchain_community.document_transformers import Html2TextTransformer
html2text = | Html2TextTransformer() | langchain_community.document_transformers.Html2TextTransformer |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet wikipedia')
from langchain import hub
from langchain.agents import AgentExecutor, create_react_agent
from langchain_community.tools import WikipediaQueryRun
from langchain_community.utilities import WikipediaAPIWrapper
from langchain_openai import OpenAI
api_wrapper = | WikipediaAPIWrapper(top_k_results=1, doc_content_chars_max=100) | langchain_community.utilities.WikipediaAPIWrapper |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai argilla')
import os
os.environ["ARGILLA_API_URL"] = "..."
os.environ["ARGILLA_API_KEY"] = "..."
os.environ["OPENAI_API_KEY"] = "..."
import argilla as rg
from packaging.version import parse as parse_version
if parse_version(rg.__version__) < parse_version("1.8.0"):
raise RuntimeError(
"`FeedbackDataset` is only available in Argilla v1.8.0 or higher, please "
"upgrade `argilla` as `pip install argilla --upgrade`."
)
dataset = rg.FeedbackDataset(
fields=[
rg.TextField(name="prompt"),
rg.TextField(name="response"),
],
questions=[
rg.RatingQuestion(
name="response-rating",
description="How would you rate the quality of the response?",
values=[1, 2, 3, 4, 5],
required=True,
),
rg.TextQuestion(
name="response-feedback",
description="What feedback do you have for the response?",
required=False,
),
],
guidelines="You're asked to rate the quality of the response and provide feedback.",
)
rg.init(
api_url=os.environ["ARGILLA_API_URL"],
api_key=os.environ["ARGILLA_API_KEY"],
)
dataset.push_to_argilla("langchain-dataset")
from langchain.callbacks import ArgillaCallbackHandler
argilla_callback = ArgillaCallbackHandler(
dataset_name="langchain-dataset",
api_url=os.environ["ARGILLA_API_URL"],
api_key=os.environ["ARGILLA_API_KEY"],
)
from langchain.callbacks import ArgillaCallbackHandler, StdOutCallbackHandler
from langchain_openai import OpenAI
argilla_callback = ArgillaCallbackHandler(
dataset_name="langchain-dataset",
api_url=os.environ["ARGILLA_API_URL"],
api_key=os.environ["ARGILLA_API_KEY"],
)
callbacks = [StdOutCallbackHandler(), argilla_callback]
llm = OpenAI(temperature=0.9, callbacks=callbacks)
llm.generate(["Tell me a joke", "Tell me a poem"] * 3)
from langchain.callbacks import ArgillaCallbackHandler, StdOutCallbackHandler
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_openai import OpenAI
argilla_callback = ArgillaCallbackHandler(
dataset_name="langchain-dataset",
api_url=os.environ["ARGILLA_API_URL"],
api_key=os.environ["ARGILLA_API_KEY"],
)
callbacks = [StdOutCallbackHandler(), argilla_callback]
llm = OpenAI(temperature=0.9, callbacks=callbacks)
template = """You are a playwright. Given the title of play, it is your job to write a synopsis for that title.
Title: {title}
Playwright: This is a synopsis for the above play:"""
prompt_template = PromptTemplate(input_variables=["title"], template=template)
synopsis_chain = LLMChain(llm=llm, prompt=prompt_template, callbacks=callbacks)
test_prompts = [{"title": "Documentary about Bigfoot in Paris"}]
synopsis_chain.apply(test_prompts)
from langchain.agents import AgentType, initialize_agent, load_tools
from langchain.callbacks import ArgillaCallbackHandler, StdOutCallbackHandler
from langchain_openai import OpenAI
argilla_callback = ArgillaCallbackHandler(
dataset_name="langchain-dataset",
api_url=os.environ["ARGILLA_API_URL"],
api_key=os.environ["ARGILLA_API_KEY"],
)
callbacks = [ | StdOutCallbackHandler() | langchain.callbacks.StdOutCallbackHandler |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass()
from langchain_core.tools import tool
@tool
def complex_tool(int_arg: int, float_arg: float, dict_arg: dict) -> int:
"""Do something complex with a complex tool."""
return int_arg * float_arg
from langchain_openai import ChatOpenAI
model = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
model_with_tools = model.bind_tools(
[complex_tool],
tool_choice="complex_tool",
)
from operator import itemgetter
from langchain.output_parsers import JsonOutputKeyToolsParser
from langchain_core.runnables import Runnable, RunnableLambda, RunnablePassthrough
chain = (
model_with_tools
| JsonOutputKeyToolsParser(key_name="complex_tool", return_single=True)
| complex_tool
)
chain.invoke(
"use complex tool. the args are 5, 2.1, empty dictionary. don't forget dict_arg"
)
from typing import Any
from langchain_core.runnables import RunnableConfig
def try_except_tool(tool_args: dict, config: RunnableConfig) -> Runnable:
try:
complex_tool.invoke(tool_args, config=config)
except Exception as e:
return f"Calling tool with arguments:\n\n{tool_args}\n\nraised the following error:\n\n{type(e)}: {e}"
chain = (
model_with_tools
| | JsonOutputKeyToolsParser(key_name="complex_tool", return_single=True) | langchain.output_parsers.JsonOutputKeyToolsParser |
from langchain.prompts import PromptTemplate
prompt = (
PromptTemplate.from_template("Tell me a joke about {topic}")
+ ", make it funny"
+ "\n\nand in {language}"
)
prompt
prompt.format(topic="sports", language="spanish")
from langchain.chains import LLMChain
from langchain_openai import ChatOpenAI
model = ChatOpenAI()
chain = LLMChain(llm=model, prompt=prompt)
chain.run(topic="sports", language="spanish")
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
prompt = SystemMessage(content="You are a nice pirate")
new_prompt = (
prompt + | HumanMessage(content="hi") | langchain_core.messages.HumanMessage |
from langchain.indexes import SQLRecordManager, index
from langchain_core.documents import Document
from langchain_elasticsearch import ElasticsearchStore
from langchain_openai import OpenAIEmbeddings
collection_name = "test_index"
embedding = OpenAIEmbeddings()
vectorstore = ElasticsearchStore(
es_url="http://localhost:9200", index_name="test_index", embedding=embedding
)
namespace = f"elasticsearch/{collection_name}"
record_manager = SQLRecordManager(
namespace, db_url="sqlite:///record_manager_cache.sql"
)
record_manager.create_schema()
doc1 = Document(page_content="kitty", metadata={"source": "kitty.txt"})
doc2 = Document(page_content="doggy", metadata={"source": "doggy.txt"})
def _clear():
"""Hacky helper method to clear content. See the `full` mode section to to understand why it works."""
index([], record_manager, vectorstore, cleanup="full", source_id_key="source")
_clear()
index(
[doc1, doc1, doc1, doc1, doc1],
record_manager,
vectorstore,
cleanup=None,
source_id_key="source",
)
_clear()
index([doc1, doc2], record_manager, vectorstore, cleanup=None, source_id_key="source")
index([doc1, doc2], record_manager, vectorstore, cleanup=None, source_id_key="source")
_clear()
index(
[doc1, doc2],
record_manager,
vectorstore,
cleanup="incremental",
source_id_key="source",
)
index(
[doc1, doc2],
record_manager,
vectorstore,
cleanup="incremental",
source_id_key="source",
)
| index([], record_manager, vectorstore, cleanup="incremental", source_id_key="source") | langchain.indexes.index |
from langchain_community.document_loaders.generic import GenericLoader
from langchain_community.document_loaders.parsers import GrobidParser
loader = GenericLoader.from_filesystem(
"../Papers/",
glob="*",
suffixes=[".pdf"],
parser= | GrobidParser(segment_sentences=False) | langchain_community.document_loaders.parsers.GrobidParser |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet boto3 nltk')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain_experimental')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain pydantic')
import os
import boto3
comprehend_client = boto3.client("comprehend", region_name="us-east-1")
from langchain_experimental.comprehend_moderation import AmazonComprehendModerationChain
comprehend_moderation = AmazonComprehendModerationChain(
client=comprehend_client,
verbose=True, # optional
)
from langchain.prompts import PromptTemplate
from langchain_community.llms.fake import FakeListLLM
from langchain_experimental.comprehend_moderation.base_moderation_exceptions import (
ModerationPiiError,
)
template = """Question: {question}
Answer:"""
prompt = PromptTemplate.from_template(template)
responses = [
"Final Answer: A credit card number looks like 1289-2321-1123-2387. A fake SSN number looks like 323-22-9980. John Doe's phone number is (999)253-9876.",
"Final Answer: This is a really <expletive> way of constructing a birdhouse. This is <expletive> insane to think that any birds would actually create their <expletive> nests here.",
]
llm = FakeListLLM(responses=responses)
chain = (
prompt
| comprehend_moderation
| {"input": (lambda x: x["output"]) | llm}
| comprehend_moderation
)
try:
response = chain.invoke(
{
"question": "A sample SSN number looks like this 123-22-3345. Can you give me some more samples?"
}
)
except ModerationPiiError as e:
print(str(e))
else:
print(response["output"])
from langchain_experimental.comprehend_moderation import (
BaseModerationConfig,
ModerationPiiConfig,
ModerationPromptSafetyConfig,
ModerationToxicityConfig,
)
pii_config = ModerationPiiConfig(labels=["SSN"], redact=True, mask_character="X")
toxicity_config = ModerationToxicityConfig(threshold=0.5)
prompt_safety_config = | ModerationPromptSafetyConfig(threshold=0.5) | langchain_experimental.comprehend_moderation.ModerationPromptSafetyConfig |
from getpass import getpass
MOSAICML_API_TOKEN = getpass()
import os
os.environ["MOSAICML_API_TOKEN"] = MOSAICML_API_TOKEN
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_community.llms import MosaicML
template = """Question: {question}"""
prompt = PromptTemplate.from_template(template)
llm = | MosaicML(inject_instruction_format=True, model_kwargs={"max_new_tokens": 128}) | langchain_community.llms.MosaicML |
meals = [
"Beef Enchiladas with Feta cheese. Mexican-Greek fusion",
"Chicken Flatbreads with red sauce. Italian-Mexican fusion",
"Veggie sweet potato quesadillas with vegan cheese",
"One-Pan Tortelonni bake with peppers and onions",
]
from langchain_openai import OpenAI
llm = OpenAI(model="gpt-3.5-turbo-instruct")
from langchain.prompts import PromptTemplate
PROMPT_TEMPLATE = """Here is the description of a meal: "{meal}".
Embed the meal into the given text: "{text_to_personalize}".
Prepend a personalized message including the user's name "{user}"
and their preference "{preference}".
Make it sound good.
"""
PROMPT = PromptTemplate(
input_variables=["meal", "text_to_personalize", "user", "preference"],
template=PROMPT_TEMPLATE,
)
import langchain_experimental.rl_chain as rl_chain
chain = rl_chain.PickBest.from_llm(llm=llm, prompt=PROMPT)
response = chain.run(
meal=rl_chain.ToSelectFrom(meals),
user=rl_chain.BasedOn("Tom"),
preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]),
text_to_personalize="This is the weeks specialty dish, our master chefs \
believe you will love it!",
)
print(response["response"])
for _ in range(5):
try:
response = chain.run(
meal=rl_chain.ToSelectFrom(meals),
user=rl_chain.BasedOn("Tom"),
preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]),
text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!",
)
except Exception as e:
print(e)
print(response["response"])
print()
scoring_criteria_template = (
"Given {preference} rank how good or bad this selection is {meal}"
)
chain = rl_chain.PickBest.from_llm(
llm=llm,
prompt=PROMPT,
selection_scorer=rl_chain.AutoSelectionScorer(
llm=llm, scoring_criteria_template_str=scoring_criteria_template
),
)
response = chain.run(
meal=rl_chain.ToSelectFrom(meals),
user=rl_chain.BasedOn("Tom"),
preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]),
text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!",
)
print(response["response"])
selection_metadata = response["selection_metadata"]
print(
f"selected index: {selection_metadata.selected.index}, score: {selection_metadata.selected.score}"
)
class CustomSelectionScorer(rl_chain.SelectionScorer):
def score_response(
self, inputs, llm_response: str, event: rl_chain.PickBestEvent
) -> float:
print(event.based_on)
print(event.to_select_from)
selected_meal = event.to_select_from["meal"][event.selected.index]
print(f"selected meal: {selected_meal}")
if "Tom" in event.based_on["user"]:
if "Vegetarian" in event.based_on["preference"]:
if "Chicken" in selected_meal or "Beef" in selected_meal:
return 0.0
else:
return 1.0
else:
if "Chicken" in selected_meal or "Beef" in selected_meal:
return 1.0
else:
return 0.0
else:
raise NotImplementedError("I don't know how to score this user")
chain = rl_chain.PickBest.from_llm(
llm=llm,
prompt=PROMPT,
selection_scorer=CustomSelectionScorer(),
)
response = chain.run(
meal= | rl_chain.ToSelectFrom(meals) | langchain_experimental.rl_chain.ToSelectFrom |
get_ipython().system('poetry run pip install dgml-utils==0.3.0 --upgrade --quiet')
import os
from langchain_community.document_loaders import DocugamiLoader
DOCUGAMI_API_KEY = os.environ.get("DOCUGAMI_API_KEY")
docset_id = "26xpy3aes7xp"
document_ids = ["d7jqdzcj50sj", "cgd1eacfkchw"]
loader = DocugamiLoader(docset_id=docset_id, document_ids=document_ids)
chunks = loader.load()
len(chunks)
loader.min_text_length = 64
loader.include_xml_tags = True
chunks = loader.load()
for chunk in chunks[:5]:
print(chunk)
get_ipython().system('poetry run pip install --upgrade langchain-openai tiktoken chromadb hnswlib')
loader = DocugamiLoader(docset_id="zo954yqy53wp")
chunks = loader.load()
for chunk in chunks:
stripped_metadata = chunk.metadata.copy()
for key in chunk.metadata:
if key not in ["name", "xpath", "id", "structure"]:
del stripped_metadata[key]
chunk.metadata = stripped_metadata
print(len(chunks))
from langchain.chains import RetrievalQA
from langchain_community.vectorstores.chroma import Chroma
from langchain_openai import OpenAI, OpenAIEmbeddings
embedding = OpenAIEmbeddings()
vectordb = Chroma.from_documents(documents=chunks, embedding=embedding)
retriever = vectordb.as_retriever()
qa_chain = RetrievalQA.from_chain_type(
llm=OpenAI(), chain_type="stuff", retriever=retriever, return_source_documents=True
)
qa_chain("What can tenants do with signage on their properties?")
chain_response = qa_chain("What is rentable area for the property owned by DHA Group?")
chain_response["result"] # correct answer should be 13,500 sq ft
chain_response["source_documents"]
loader = DocugamiLoader(docset_id="zo954yqy53wp")
loader.include_xml_tags = (
True # for additional semantics from the Docugami knowledge graph
)
chunks = loader.load()
print(chunks[0].metadata)
get_ipython().system('poetry run pip install --upgrade lark --quiet')
from langchain.chains.query_constructor.schema import AttributeInfo
from langchain.retrievers.self_query.base import SelfQueryRetriever
from langchain_community.vectorstores.chroma import Chroma
EXCLUDE_KEYS = ["id", "xpath", "structure"]
metadata_field_info = [
AttributeInfo(
name=key,
description=f"The {key} for this chunk",
type="string",
)
for key in chunks[0].metadata
if key.lower() not in EXCLUDE_KEYS
]
document_content_description = "Contents of this chunk"
llm = OpenAI(temperature=0)
vectordb = Chroma.from_documents(documents=chunks, embedding=embedding)
retriever = SelfQueryRetriever.from_llm(
llm, vectordb, document_content_description, metadata_field_info, verbose=True
)
qa_chain = RetrievalQA.from_chain_type(
llm=OpenAI(),
chain_type="stuff",
retriever=retriever,
return_source_documents=True,
verbose=True,
)
qa_chain(
"What is rentable area for the property owned by DHA Group?"
) # correct answer should be 13,500 sq ft
from typing import Dict, List
from langchain_community.document_loaders import DocugamiLoader
from langchain_core.documents import Document
loader = DocugamiLoader(docset_id="zo954yqy53wp")
loader.include_xml_tags = (
True # for additional semantics from the Docugami knowledge graph
)
loader.parent_hierarchy_levels = 3 # for expanded context
loader.max_text_length = (
1024 * 8
) # 8K chars are roughly 2K tokens (ref: https://help.openai.com/en/articles/4936856-what-are-tokens-and-how-to-count-them)
loader.include_project_metadata_in_doc_metadata = (
False # Not filtering on vector metadata, so remove to lighten the vectors
)
chunks: List[Document] = loader.load()
parents_by_id: Dict[str, Document] = {}
children_by_id: Dict[str, Document] = {}
for chunk in chunks:
chunk_id = chunk.metadata.get("id")
parent_chunk_id = chunk.metadata.get(loader.parent_id_key)
if not parent_chunk_id:
parents_by_id[chunk_id] = chunk
else:
children_by_id[chunk_id] = chunk
for id, chunk in list(children_by_id.items())[:5]:
parent_chunk_id = chunk.metadata.get(loader.parent_id_key)
if parent_chunk_id:
print(f"PARENT CHUNK {parent_chunk_id}: {parents_by_id[parent_chunk_id]}")
print(f"CHUNK {id}: {chunk}")
from langchain.retrievers.multi_vector import MultiVectorRetriever, SearchType
from langchain.storage import InMemoryStore
from langchain_community.vectorstores.chroma import Chroma
from langchain_openai import OpenAIEmbeddings
vectorstore = Chroma(collection_name="big2small", embedding_function=OpenAIEmbeddings())
store = | InMemoryStore() | langchain.storage.InMemoryStore |
import os
import yaml
get_ipython().system('wget https://raw.githubusercontent.com/openai/openai-openapi/master/openapi.yaml -O openai_openapi.yaml')
get_ipython().system('wget https://www.klarna.com/us/shopping/public/openai/v0/api-docs -O klarna_openapi.yaml')
get_ipython().system('wget https://raw.githubusercontent.com/APIs-guru/openapi-directory/main/APIs/spotify.com/1.0.0/openapi.yaml -O spotify_openapi.yaml')
from langchain_community.agent_toolkits.openapi.spec import reduce_openapi_spec
with open("openai_openapi.yaml") as f:
raw_openai_api_spec = yaml.load(f, Loader=yaml.Loader)
openai_api_spec = reduce_openapi_spec(raw_openai_api_spec)
with open("klarna_openapi.yaml") as f:
raw_klarna_api_spec = yaml.load(f, Loader=yaml.Loader)
klarna_api_spec = | reduce_openapi_spec(raw_klarna_api_spec) | langchain_community.agent_toolkits.openapi.spec.reduce_openapi_spec |
from langchain.evaluation import load_evaluator
evaluator = | load_evaluator("criteria", criteria="conciseness") | langchain.evaluation.load_evaluator |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet tiktoken langchain-openai python-dotenv datasets langchain deeplake beautifulsoup4 html2text ragas')
ORG_ID = "..."
import getpass
import os
from langchain.chains import RetrievalQA
from langchain.vectorstores.deeplake import DeepLake
from langchain_openai import OpenAIChat, OpenAIEmbeddings
os.environ["OPENAI_API_KEY"] = getpass.getpass("Enter your OpenAI API token: ")
os.environ["ACTIVELOOP_TOKEN"] = getpass.getpass(
"Enter your ActiveLoop API token: "
) # Get your API token from https://app.activeloop.ai, click on your profile picture in the top right corner, and select "API Tokens"
token = os.getenv("ACTIVELOOP_TOKEN")
openai_embeddings = OpenAIEmbeddings()
db = DeepLake(
dataset_path=f"hub://{ORG_ID}/deeplake-docs-deepmemory", # org_id stands for your username or organization from activeloop
embedding=openai_embeddings,
runtime={"tensor_db": True},
token=token,
read_only=False,
)
from urllib.parse import urljoin
import requests
from bs4 import BeautifulSoup
def get_all_links(url):
response = requests.get(url)
if response.status_code != 200:
print(f"Failed to retrieve the page: {url}")
return []
soup = BeautifulSoup(response.content, "html.parser")
links = [
urljoin(url, a["href"]) for a in soup.find_all("a", href=True) if a["href"]
]
return links
base_url = "https://docs.deeplake.ai/en/latest/"
all_links = get_all_links(base_url)
from langchain.document_loaders import AsyncHtmlLoader
loader = | AsyncHtmlLoader(all_links) | langchain.document_loaders.AsyncHtmlLoader |
from langchain_community.chat_models.llama_edge import LlamaEdgeChatService
from langchain_core.messages import HumanMessage, SystemMessage
service_url = "https://b008-54-186-154-209.ngrok-free.app"
chat = | LlamaEdgeChatService(service_url=service_url) | langchain_community.chat_models.llama_edge.LlamaEdgeChatService |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-experimental langchain-openai neo4j wikipedia')
from langchain_experimental.graph_transformers.diffbot import DiffbotGraphTransformer
diffbot_api_key = "DIFFBOT_API_KEY"
diffbot_nlp = DiffbotGraphTransformer(diffbot_api_key=diffbot_api_key)
from langchain_community.document_loaders import WikipediaLoader
query = "Warren Buffett"
raw_documents = WikipediaLoader(query=query).load()
graph_documents = diffbot_nlp.convert_to_graph_documents(raw_documents)
from langchain_community.graphs import Neo4jGraph
url = "bolt://localhost:7687"
username = "neo4j"
password = "pleaseletmein"
graph = | Neo4jGraph(url=url, username=username, password=password) | langchain_community.graphs.Neo4jGraph |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet semanticscholar')
from langchain import hub
from langchain.agents import AgentExecutor, create_openai_functions_agent
from langchain_openai import ChatOpenAI
instructions = """You are an expert researcher."""
base_prompt = hub.pull("langchain-ai/openai-functions-template")
prompt = base_prompt.partial(instructions=instructions)
llm = ChatOpenAI(temperature=0)
from langchain_community.tools.semanticscholar.tool import SemanticScholarQueryRun
tools = [SemanticScholarQueryRun()]
agent = | create_openai_functions_agent(llm, tools, prompt) | langchain.agents.create_openai_functions_agent |
get_ipython().system(' pip install langchain unstructured[all-docs] pydantic lxml')
from typing import Any
from pydantic import BaseModel
from unstructured.partition.pdf import partition_pdf
path = "/Users/rlm/Desktop/Papers/LLaVA/"
raw_pdf_elements = partition_pdf(
filename=path + "LLaVA.pdf",
extract_images_in_pdf=True,
infer_table_structure=True,
chunking_strategy="by_title",
max_characters=4000,
new_after_n_chars=3800,
combine_text_under_n_chars=2000,
image_output_dir_path=path,
)
category_counts = {}
for element in raw_pdf_elements:
category = str(type(element))
if category in category_counts:
category_counts[category] += 1
else:
category_counts[category] = 1
unique_categories = set(category_counts.keys())
category_counts
class Element(BaseModel):
type: str
text: Any
categorized_elements = []
for element in raw_pdf_elements:
if "unstructured.documents.elements.Table" in str(type(element)):
categorized_elements.append(Element(type="table", text=str(element)))
elif "unstructured.documents.elements.CompositeElement" in str(type(element)):
categorized_elements.append(Element(type="text", text=str(element)))
table_elements = [e for e in categorized_elements if e.type == "table"]
print(len(table_elements))
text_elements = [e for e in categorized_elements if e.type == "text"]
print(len(text_elements))
from langchain_community.chat_models import ChatOllama
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
prompt_text = """You are an assistant tasked with summarizing tables and text. \
Give a concise summary of the table or text. Table or text chunk: {element} """
prompt = ChatPromptTemplate.from_template(prompt_text)
model = ChatOllama(model="llama2:13b-chat")
summarize_chain = {"element": lambda x: x} | prompt | model | StrOutputParser()
texts = [i.text for i in text_elements if i.text != ""]
text_summaries = summarize_chain.batch(texts, {"max_concurrency": 5})
tables = [i.text for i in table_elements]
table_summaries = summarize_chain.batch(tables, {"max_concurrency": 5})
get_ipython().run_cell_magic('bash', '', '\n# Define the directory containing the images\nIMG_DIR=~/Desktop/Papers/LLaVA/\n\n# Loop through each image in the directory\nfor img in "${IMG_DIR}"*.jpg; do\n # Extract the base name of the image without extension\n base_name=$(basename "$img" .jpg)\n\n # Define the output file name based on the image name\n output_file="${IMG_DIR}${base_name}.txt"\n\n # Execute the command and save the output to the defined output file\n /Users/rlm/Desktop/Code/llama.cpp/bin/llava -m ../models/llava-7b/ggml-model-q5_k.gguf --mmproj ../models/llava-7b/mmproj-model-f16.gguf --temp 0.1 -p "Describe the image in detail. Be specific about graphs, such as bar plots." --image "$img" > "$output_file"\n\ndone\n')
import glob
import os
file_paths = glob.glob(os.path.expanduser(os.path.join(path, "*.txt")))
img_summaries = []
for file_path in file_paths:
with open(file_path, "r") as file:
img_summaries.append(file.read())
cleaned_img_summary = [
s.split("clip_model_load: total allocated memory: 201.27 MB\n\n", 1)[1].strip()
for s in img_summaries
]
import uuid
from langchain.retrievers.multi_vector import MultiVectorRetriever
from langchain.storage import InMemoryStore
from langchain_community.embeddings import GPT4AllEmbeddings
from langchain_community.vectorstores import Chroma
from langchain_core.documents import Document
vectorstore = Chroma(
collection_name="summaries", embedding_function= | GPT4AllEmbeddings() | langchain_community.embeddings.GPT4AllEmbeddings |
model_url = "http://localhost:5000"
from langchain.chains import LLMChain
from langchain.globals import set_debug
from langchain.prompts import PromptTemplate
from langchain_community.llms import TextGen
set_debug(True)
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate.from_template(template)
llm = TextGen(model_url=model_url)
llm_chain = LLMChain(prompt=prompt, llm=llm)
question = "What NFL team won the Super Bowl in the year Justin Bieber was born?"
llm_chain.run(question)
model_url = "ws://localhost:5005"
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.chains import LLMChain
from langchain.globals import set_debug
from langchain.prompts import PromptTemplate
from langchain_community.llms import TextGen
set_debug(True)
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate.from_template(template)
llm = TextGen(
model_url=model_url, streaming=True, callbacks=[ | StreamingStdOutCallbackHandler() | langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler |
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemory
from langchain_openai import OpenAI
llm = OpenAI(temperature=0)
conversation = ConversationChain(
llm=llm, verbose=True, memory=ConversationBufferMemory()
)
conversation.predict(input="Hi there!")
conversation.predict(input="What's the weather?")
from langchain.prompts.prompt import PromptTemplate
template = """The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
Current conversation:
{history}
Human: {input}
AI Assistant:"""
PROMPT = PromptTemplate(input_variables=["history", "input"], template=template)
conversation = ConversationChain(
prompt=PROMPT,
llm=llm,
verbose=True,
memory= | ConversationBufferMemory(ai_prefix="AI Assistant") | langchain.memory.ConversationBufferMemory |
arthur_url = "https://app.arthur.ai"
arthur_login = "your-arthur-login-username-here"
arthur_model_id = "your-arthur-model-id-here"
from langchain.callbacks import ArthurCallbackHandler
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain_core.messages import HumanMessage
from langchain_openai import ChatOpenAI
def make_langchain_chat_llm():
return ChatOpenAI(
streaming=True,
temperature=0.1,
callbacks=[
| StreamingStdOutCallbackHandler() | langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler |
meals = [
"Beef Enchiladas with Feta cheese. Mexican-Greek fusion",
"Chicken Flatbreads with red sauce. Italian-Mexican fusion",
"Veggie sweet potato quesadillas with vegan cheese",
"One-Pan Tortelonni bake with peppers and onions",
]
from langchain_openai import OpenAI
llm = OpenAI(model="gpt-3.5-turbo-instruct")
from langchain.prompts import PromptTemplate
PROMPT_TEMPLATE = """Here is the description of a meal: "{meal}".
Embed the meal into the given text: "{text_to_personalize}".
Prepend a personalized message including the user's name "{user}"
and their preference "{preference}".
Make it sound good.
"""
PROMPT = PromptTemplate(
input_variables=["meal", "text_to_personalize", "user", "preference"],
template=PROMPT_TEMPLATE,
)
import langchain_experimental.rl_chain as rl_chain
chain = rl_chain.PickBest.from_llm(llm=llm, prompt=PROMPT)
response = chain.run(
meal=rl_chain.ToSelectFrom(meals),
user=rl_chain.BasedOn("Tom"),
preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]),
text_to_personalize="This is the weeks specialty dish, our master chefs \
believe you will love it!",
)
print(response["response"])
for _ in range(5):
try:
response = chain.run(
meal=rl_chain.ToSelectFrom(meals),
user=rl_chain.BasedOn("Tom"),
preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]),
text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!",
)
except Exception as e:
print(e)
print(response["response"])
print()
scoring_criteria_template = (
"Given {preference} rank how good or bad this selection is {meal}"
)
chain = rl_chain.PickBest.from_llm(
llm=llm,
prompt=PROMPT,
selection_scorer=rl_chain.AutoSelectionScorer(
llm=llm, scoring_criteria_template_str=scoring_criteria_template
),
)
response = chain.run(
meal=rl_chain.ToSelectFrom(meals),
user=rl_chain.BasedOn("Tom"),
preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]),
text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!",
)
print(response["response"])
selection_metadata = response["selection_metadata"]
print(
f"selected index: {selection_metadata.selected.index}, score: {selection_metadata.selected.score}"
)
class CustomSelectionScorer(rl_chain.SelectionScorer):
def score_response(
self, inputs, llm_response: str, event: rl_chain.PickBestEvent
) -> float:
print(event.based_on)
print(event.to_select_from)
selected_meal = event.to_select_from["meal"][event.selected.index]
print(f"selected meal: {selected_meal}")
if "Tom" in event.based_on["user"]:
if "Vegetarian" in event.based_on["preference"]:
if "Chicken" in selected_meal or "Beef" in selected_meal:
return 0.0
else:
return 1.0
else:
if "Chicken" in selected_meal or "Beef" in selected_meal:
return 1.0
else:
return 0.0
else:
raise NotImplementedError("I don't know how to score this user")
chain = rl_chain.PickBest.from_llm(
llm=llm,
prompt=PROMPT,
selection_scorer=CustomSelectionScorer(),
)
response = chain.run(
meal=rl_chain.ToSelectFrom(meals),
user=rl_chain.BasedOn("Tom"),
preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]),
text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!",
)
class CustomSelectionScorer(rl_chain.SelectionScorer):
def score_preference(self, preference, selected_meal):
if "Vegetarian" in preference:
if "Chicken" in selected_meal or "Beef" in selected_meal:
return 0.0
else:
return 1.0
else:
if "Chicken" in selected_meal or "Beef" in selected_meal:
return 1.0
else:
return 0.0
def score_response(
self, inputs, llm_response: str, event: rl_chain.PickBestEvent
) -> float:
selected_meal = event.to_select_from["meal"][event.selected.index]
if "Tom" in event.based_on["user"]:
return self.score_preference(event.based_on["preference"], selected_meal)
elif "Anna" in event.based_on["user"]:
return self.score_preference(event.based_on["preference"], selected_meal)
else:
raise NotImplementedError("I don't know how to score this user")
chain = rl_chain.PickBest.from_llm(
llm=llm,
prompt=PROMPT,
selection_scorer=CustomSelectionScorer(),
metrics_step=5,
metrics_window_size=5, # rolling window average
)
random_chain = rl_chain.PickBest.from_llm(
llm=llm,
prompt=PROMPT,
selection_scorer=CustomSelectionScorer(),
metrics_step=5,
metrics_window_size=5, # rolling window average
policy=rl_chain.PickBestRandomPolicy, # set the random policy instead of default
)
for _ in range(20):
try:
chain.run(
meal=rl_chain.ToSelectFrom(meals),
user=rl_chain.BasedOn("Tom"),
preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]),
text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!",
)
random_chain.run(
meal=rl_chain.ToSelectFrom(meals),
user=rl_chain.BasedOn("Tom"),
preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]),
text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!",
)
chain.run(
meal=rl_chain.ToSelectFrom(meals),
user=rl_chain.BasedOn("Anna"),
preference=rl_chain.BasedOn(["Loves meat", "especially beef"]),
text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!",
)
random_chain.run(
meal= | rl_chain.ToSelectFrom(meals) | langchain_experimental.rl_chain.ToSelectFrom |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
from langchain.evaluation import load_evaluator
evaluator = load_evaluator("trajectory")
import subprocess
from urllib.parse import urlparse
from langchain.agents import AgentType, initialize_agent
from langchain.tools import tool
from langchain_openai import ChatOpenAI
from pydantic import HttpUrl
@tool
def ping(url: HttpUrl, return_error: bool) -> str:
"""Ping the fully specified url. Must include https:// in the url."""
hostname = urlparse(str(url)).netloc
completed_process = subprocess.run(
["ping", "-c", "1", hostname], capture_output=True, text=True
)
output = completed_process.stdout
if return_error and completed_process.returncode != 0:
return completed_process.stderr
return output
@tool
def trace_route(url: HttpUrl, return_error: bool) -> str:
"""Trace the route to the specified url. Must include https:// in the url."""
hostname = urlparse(str(url)).netloc
completed_process = subprocess.run(
["traceroute", hostname], capture_output=True, text=True
)
output = completed_process.stdout
if return_error and completed_process.returncode != 0:
return completed_process.stderr
return output
llm = ChatOpenAI(model="gpt-3.5-turbo-0613", temperature=0)
agent = initialize_agent(
llm=llm,
tools=[ping, trace_route],
agent=AgentType.OPENAI_MULTI_FUNCTIONS,
return_intermediate_steps=True, # IMPORTANT!
)
result = agent("What's the latency like for https://langchain.com?")
evaluation_result = evaluator.evaluate_agent_trajectory(
prediction=result["output"],
input=result["input"],
agent_trajectory=result["intermediate_steps"],
)
evaluation_result
get_ipython().run_line_magic('pip', 'install --upgrade --quiet anthropic')
from langchain_community.chat_models import ChatAnthropic
eval_llm = | ChatAnthropic(temperature=0) | langchain_community.chat_models.ChatAnthropic |
import os
os.environ["SERPER_API_KEY"] = ""
os.environ["OPENAI_API_KEY"] = ""
from typing import Any, List
from langchain.callbacks.manager import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_community.utilities import GoogleSerperAPIWrapper
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from langchain_openai import ChatOpenAI, OpenAI
class SerperSearchRetriever(BaseRetriever):
search: GoogleSerperAPIWrapper = None
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun, **kwargs: Any
) -> List[Document]:
return [Document(page_content=self.search.run(query))]
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
**kwargs: Any,
) -> List[Document]:
raise NotImplementedError()
retriever = SerperSearchRetriever(search= | GoogleSerperAPIWrapper() | langchain_community.utilities.GoogleSerperAPIWrapper |
from langchain_community.document_loaders import UnstructuredRSTLoader
loader = | UnstructuredRSTLoader(file_path="example_data/README.rst", mode="elements") | langchain_community.document_loaders.UnstructuredRSTLoader |
from langchain_community.document_loaders import FacebookChatLoader
loader = | FacebookChatLoader("example_data/facebook_chat.json") | langchain_community.document_loaders.FacebookChatLoader |
from langchain.agents import AgentExecutor, Tool, ZeroShotAgent
from langchain.chains import LLMChain
from langchain.memory import ConversationBufferMemory
from langchain_community.chat_message_histories import RedisChatMessageHistory
from langchain_community.utilities import GoogleSearchAPIWrapper
from langchain_openai import OpenAI
search = GoogleSearchAPIWrapper()
tools = [
Tool(
name="Search",
func=search.run,
description="useful for when you need to answer questions about current events",
)
]
prefix = """Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:"""
suffix = """Begin!"
{chat_history}
Question: {input}
{agent_scratchpad}"""
prompt = ZeroShotAgent.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
input_variables=["input", "chat_history", "agent_scratchpad"],
)
message_history = RedisChatMessageHistory(
url="redis://localhost:6379/0", ttl=600, session_id="my-session"
)
memory = ConversationBufferMemory(
memory_key="chat_history", chat_memory=message_history
)
llm_chain = LLMChain(llm=OpenAI(temperature=0), prompt=prompt)
agent = ZeroShotAgent(llm_chain=llm_chain, tools=tools, verbose=True)
agent_chain = AgentExecutor.from_agent_and_tools(
agent=agent, tools=tools, verbose=True, memory=memory
)
agent_chain.run(input="How many people live in canada?")
agent_chain.run(input="what is their national anthem called?")
prefix = """Have a conversation with a human, answering the following questions as best you can. You have access to the following tools:"""
suffix = """Begin!"
Question: {input}
{agent_scratchpad}"""
prompt = ZeroShotAgent.create_prompt(
tools, prefix=prefix, suffix=suffix, input_variables=["input", "agent_scratchpad"]
)
llm_chain = LLMChain(llm=OpenAI(temperature=0), prompt=prompt)
agent = | ZeroShotAgent(llm_chain=llm_chain, tools=tools, verbose=True) | langchain.agents.ZeroShotAgent |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet meilisearch')
import getpass
import os
os.environ["MEILI_HTTP_ADDR"] = getpass.getpass("Meilisearch HTTP address and port:")
os.environ["MEILI_MASTER_KEY"] = getpass.getpass("Meilisearch API Key:")
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
from langchain_community.vectorstores import Meilisearch
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
embeddings = OpenAIEmbeddings()
with open("../../modules/state_of_the_union.txt") as f:
state_of_the_union = f.read()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_text(state_of_the_union)
vector_store = Meilisearch.from_texts(texts=texts, embedding=embeddings)
from langchain_community.document_loaders import TextLoader
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
vector_store = | Meilisearch.from_documents(documents=documents, embedding=embeddings) | langchain_community.vectorstores.Meilisearch.from_documents |
import os
os.environ["GOLDEN_API_KEY"] = ""
from langchain_community.utilities.golden_query import GoldenQueryAPIWrapper
golden_query = | GoldenQueryAPIWrapper() | langchain_community.utilities.golden_query.GoldenQueryAPIWrapper |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
import os
import uuid
uid = uuid.uuid4().hex[:6]
project_name = f"Run Fine-tuning Walkthrough {uid}"
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_API_KEY"] = "YOUR API KEY"
os.environ["LANGCHAIN_PROJECT"] = project_name
from enum import Enum
from langchain_core.pydantic_v1 import BaseModel, Field
class Operation(Enum):
add = "+"
subtract = "-"
multiply = "*"
divide = "/"
class Calculator(BaseModel):
"""A calculator function"""
num1: float
num2: float
operation: Operation = Field(..., description="+,-,*,/")
def calculate(self):
if self.operation == Operation.add:
return self.num1 + self.num2
elif self.operation == Operation.subtract:
return self.num1 - self.num2
elif self.operation == Operation.multiply:
return self.num1 * self.num2
elif self.operation == Operation.divide:
if self.num2 != 0:
return self.num1 / self.num2
else:
return "Cannot divide by zero"
from pprint import pprint
from langchain.utils.openai_functions import convert_pydantic_to_openai_function
from langchain_core.pydantic_v1 import BaseModel
openai_function_def = | convert_pydantic_to_openai_function(Calculator) | langchain.utils.openai_functions.convert_pydantic_to_openai_function |
get_ipython().run_line_magic('pip', 'install -qU chromadb langchain langchain-community langchain-openai')
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
loader = TextLoader("../../state_of_the_union.txt")
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
for i, doc in enumerate(texts):
doc.metadata["page_chunk"] = i
embeddings = OpenAIEmbeddings()
vectorstore = Chroma.from_documents(texts, embeddings, collection_name="state-of-union")
retriever = vectorstore.as_retriever()
from langchain.tools.retriever import create_retriever_tool
retriever_tool = create_retriever_tool(
retriever,
"state-of-union-retriever",
"Query a retriever to get information about state of the union address",
)
from typing import List
from langchain_core.pydantic_v1 import BaseModel, Field
class Response(BaseModel):
"""Final response to the question being asked"""
answer: str = Field(description="The final answer to respond to the user")
sources: List[int] = Field(
description="List of page chunks that contain answer to the question. Only include a page chunk if it contains relevant information"
)
import json
from langchain_core.agents import AgentActionMessageLog, AgentFinish
def parse(output):
if "function_call" not in output.additional_kwargs:
return AgentFinish(return_values={"output": output.content}, log=output.content)
function_call = output.additional_kwargs["function_call"]
name = function_call["name"]
inputs = json.loads(function_call["arguments"])
if name == "Response":
return AgentFinish(return_values=inputs, log=str(function_call))
else:
return AgentActionMessageLog(
tool=name, tool_input=inputs, log="", message_log=[output]
)
from langchain.agents import AgentExecutor
from langchain.agents.format_scratchpad import format_to_openai_function_messages
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_openai import ChatOpenAI
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a helpful assistant"),
("user", "{input}"),
| MessagesPlaceholder(variable_name="agent_scratchpad") | langchain_core.prompts.MessagesPlaceholder |
from langchain.agents import AgentType, initialize_agent
from langchain.requests import Requests
from langchain_community.agent_toolkits import NLAToolkit
from langchain_openai import OpenAI
llm = OpenAI(
temperature=0, max_tokens=700, model_name="gpt-3.5-turbo-instruct"
) # You can swap between different core LLM's here.
speak_toolkit = | NLAToolkit.from_llm_and_url(llm, "https://api.speak.com/openapi.yaml") | langchain_community.agent_toolkits.NLAToolkit.from_llm_and_url |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet sqlite-vss')
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings.sentence_transformer import (
SentenceTransformerEmbeddings,
)
from langchain_community.vectorstores import SQLiteVSS
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
texts = [doc.page_content for doc in docs]
embedding_function = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
db = SQLiteVSS.from_texts(
texts=texts,
embedding=embedding_function,
table="state_union",
db_file="/tmp/vss.db",
)
query = "What did the president say about Ketanji Brown Jackson"
data = db.similarity_search(query)
data[0].page_content
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings.sentence_transformer import (
SentenceTransformerEmbeddings,
)
from langchain_community.vectorstores import SQLiteVSS
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
texts = [doc.page_content for doc in docs]
embedding_function = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
connection = | SQLiteVSS.create_connection(db_file="/tmp/vss.db") | langchain_community.vectorstores.SQLiteVSS.create_connection |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet tiktoken langchain-openai python-dotenv datasets langchain deeplake beautifulsoup4 html2text ragas')
ORG_ID = "..."
import getpass
import os
from langchain.chains import RetrievalQA
from langchain.vectorstores.deeplake import DeepLake
from langchain_openai import OpenAIChat, OpenAIEmbeddings
os.environ["OPENAI_API_KEY"] = getpass.getpass("Enter your OpenAI API token: ")
os.environ["ACTIVELOOP_TOKEN"] = getpass.getpass(
"Enter your ActiveLoop API token: "
) # Get your API token from https://app.activeloop.ai, click on your profile picture in the top right corner, and select "API Tokens"
token = os.getenv("ACTIVELOOP_TOKEN")
openai_embeddings = OpenAIEmbeddings()
db = DeepLake(
dataset_path=f"hub://{ORG_ID}/deeplake-docs-deepmemory", # org_id stands for your username or organization from activeloop
embedding=openai_embeddings,
runtime={"tensor_db": True},
token=token,
read_only=False,
)
from urllib.parse import urljoin
import requests
from bs4 import BeautifulSoup
def get_all_links(url):
response = requests.get(url)
if response.status_code != 200:
print(f"Failed to retrieve the page: {url}")
return []
soup = BeautifulSoup(response.content, "html.parser")
links = [
urljoin(url, a["href"]) for a in soup.find_all("a", href=True) if a["href"]
]
return links
base_url = "https://docs.deeplake.ai/en/latest/"
all_links = get_all_links(base_url)
from langchain.document_loaders import AsyncHtmlLoader
loader = AsyncHtmlLoader(all_links)
docs = loader.load()
from langchain.document_transformers import Html2TextTransformer
html2text = Html2TextTransformer()
docs_transformed = html2text.transform_documents(docs)
from langchain_text_splitters import RecursiveCharacterTextSplitter
chunk_size = 4096
docs_new = []
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=chunk_size,
)
for doc in docs_transformed:
if len(doc.page_content) < chunk_size:
docs_new.append(doc)
else:
docs = text_splitter.create_documents([doc.page_content])
docs_new.extend(docs)
docs = db.add_documents(docs_new)
from typing import List
from langchain.chains.openai_functions import (
create_structured_output_chain,
)
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain_openai import ChatOpenAI
from pydantic import BaseModel, Field
docs = db.vectorstore.dataset.text.data(fetch_chunks=True, aslist=True)["value"]
ids = db.vectorstore.dataset.id.data(fetch_chunks=True, aslist=True)["value"]
llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
class Questions(BaseModel):
"""Identifying information about a person."""
question: str = Field(..., description="Questions about text")
prompt_msgs = [
SystemMessage(
content="You are a world class expert for generating questions based on provided context. \
You make sure the question can be answered by the text."
),
HumanMessagePromptTemplate.from_template(
"Use the given text to generate a question from the following input: {input}"
),
HumanMessage(content="Tips: Make sure to answer in the correct format"),
]
prompt = ChatPromptTemplate(messages=prompt_msgs)
chain = create_structured_output_chain(Questions, llm, prompt, verbose=True)
text = "# Understanding Hallucinations and Bias ## **Introduction** In this lesson, we'll cover the concept of **hallucinations** in LLMs, highlighting their influence on AI applications and demonstrating how to mitigate them using techniques like the retriever's architectures. We'll also explore **bias** within LLMs with examples."
questions = chain.run(input=text)
print(questions)
import random
from langchain_openai import OpenAIEmbeddings
from tqdm import tqdm
def generate_queries(docs: List[str], ids: List[str], n: int = 100):
questions = []
relevances = []
pbar = tqdm(total=n)
while len(questions) < n:
r = random.randint(0, len(docs) - 1)
text, label = docs[r], ids[r]
generated_qs = [chain.run(input=text).question]
questions.extend(generated_qs)
relevances.extend([[(label, 1)] for _ in generated_qs])
pbar.update(len(generated_qs))
if len(questions) % 10 == 0:
print(f"q: {len(questions)}")
return questions[:n], relevances[:n]
chain = | create_structured_output_chain(Questions, llm, prompt, verbose=False) | langchain.chains.openai_functions.create_structured_output_chain |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai argilla')
import os
os.environ["ARGILLA_API_URL"] = "..."
os.environ["ARGILLA_API_KEY"] = "..."
os.environ["OPENAI_API_KEY"] = "..."
import argilla as rg
from packaging.version import parse as parse_version
if parse_version(rg.__version__) < parse_version("1.8.0"):
raise RuntimeError(
"`FeedbackDataset` is only available in Argilla v1.8.0 or higher, please "
"upgrade `argilla` as `pip install argilla --upgrade`."
)
dataset = rg.FeedbackDataset(
fields=[
rg.TextField(name="prompt"),
rg.TextField(name="response"),
],
questions=[
rg.RatingQuestion(
name="response-rating",
description="How would you rate the quality of the response?",
values=[1, 2, 3, 4, 5],
required=True,
),
rg.TextQuestion(
name="response-feedback",
description="What feedback do you have for the response?",
required=False,
),
],
guidelines="You're asked to rate the quality of the response and provide feedback.",
)
rg.init(
api_url=os.environ["ARGILLA_API_URL"],
api_key=os.environ["ARGILLA_API_KEY"],
)
dataset.push_to_argilla("langchain-dataset")
from langchain.callbacks import ArgillaCallbackHandler
argilla_callback = ArgillaCallbackHandler(
dataset_name="langchain-dataset",
api_url=os.environ["ARGILLA_API_URL"],
api_key=os.environ["ARGILLA_API_KEY"],
)
from langchain.callbacks import ArgillaCallbackHandler, StdOutCallbackHandler
from langchain_openai import OpenAI
argilla_callback = ArgillaCallbackHandler(
dataset_name="langchain-dataset",
api_url=os.environ["ARGILLA_API_URL"],
api_key=os.environ["ARGILLA_API_KEY"],
)
callbacks = [ | StdOutCallbackHandler() | langchain.callbacks.StdOutCallbackHandler |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet sagemaker')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-openai')
get_ipython().run_line_magic('pip', 'install --upgrade --quiet google-search-results')
import os
os.environ["OPENAI_API_KEY"] = "<ADD-KEY-HERE>"
os.environ["SERPAPI_API_KEY"] = "<ADD-KEY-HERE>"
from langchain.agents import initialize_agent, load_tools
from langchain.callbacks import SageMakerCallbackHandler
from langchain.chains import LLMChain, SimpleSequentialChain
from langchain.prompts import PromptTemplate
from langchain_openai import OpenAI
from sagemaker.analytics import ExperimentAnalytics
from sagemaker.experiments.run import Run
from sagemaker.session import Session
HPARAMS = {
"temperature": 0.1,
"model_name": "gpt-3.5-turbo-instruct",
}
BUCKET_NAME = None
EXPERIMENT_NAME = "langchain-sagemaker-tracker"
session = Session(default_bucket=BUCKET_NAME)
RUN_NAME = "run-scenario-1"
PROMPT_TEMPLATE = "tell me a joke about {topic}"
INPUT_VARIABLES = {"topic": "fish"}
with Run(
experiment_name=EXPERIMENT_NAME, run_name=RUN_NAME, sagemaker_session=session
) as run:
sagemaker_callback = | SageMakerCallbackHandler(run) | langchain.callbacks.SageMakerCallbackHandler |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet html2text')
from langchain_community.document_loaders import AsyncHtmlLoader
urls = ["https://www.espn.com", "https://lilianweng.github.io/posts/2023-06-23-agent/"]
loader = | AsyncHtmlLoader(urls) | langchain_community.document_loaders.AsyncHtmlLoader |
from langchain.chains import RetrievalQA
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
loader = TextLoader("../../state_of_the_union.txt", encoding="utf-8")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
for i, text in enumerate(texts):
text.metadata["source"] = f"{i}-pl"
embeddings = OpenAIEmbeddings()
docsearch = Chroma.from_documents(texts, embeddings)
from langchain.chains import create_qa_with_sources_chain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.prompts import PromptTemplate
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613")
qa_chain = create_qa_with_sources_chain(llm)
doc_prompt = PromptTemplate(
template="Content: {page_content}\nSource: {source}",
input_variables=["page_content", "source"],
)
final_qa_chain = StuffDocumentsChain(
llm_chain=qa_chain,
document_variable_name="context",
document_prompt=doc_prompt,
)
retrieval_qa = RetrievalQA(
retriever=docsearch.as_retriever(), combine_documents_chain=final_qa_chain
)
query = "What did the president say about russia"
retrieval_qa.run(query)
qa_chain_pydantic = | create_qa_with_sources_chain(llm, output_parser="pydantic") | langchain.chains.create_qa_with_sources_chain |
from langchain_community.document_loaders import HuggingFaceDatasetLoader
dataset_name = "imdb"
page_content_column = "text"
loader = HuggingFaceDatasetLoader(dataset_name, page_content_column)
data = loader.load()
data[:15]
from langchain.indexes import VectorstoreIndexCreator
from langchain_community.document_loaders.hugging_face_dataset import (
HuggingFaceDatasetLoader,
)
dataset_name = "tweet_eval"
page_content_column = "text"
name = "stance_climate"
loader = HuggingFaceDatasetLoader(dataset_name, page_content_column, name)
index = | VectorstoreIndexCreator() | langchain.indexes.VectorstoreIndexCreator |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain langchain-openai')
from langchain.chains import OpenAIModerationChain
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import OpenAI
moderate = OpenAIModerationChain()
model = OpenAI()
prompt = | ChatPromptTemplate.from_messages([("system", "repeat after me: {input}")]) | langchain_core.prompts.ChatPromptTemplate.from_messages |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain fleet-context langchain-openai pandas faiss-cpu # faiss-gpu for CUDA supported GPU')
from operator import itemgetter
from typing import Any, Optional, Type
import pandas as pd
from langchain.retrievers import MultiVectorRetriever
from langchain_community.vectorstores import FAISS
from langchain_core.documents import Document
from langchain_core.stores import BaseStore
from langchain_core.vectorstores import VectorStore
from langchain_openai import OpenAIEmbeddings
def load_fleet_retriever(
df: pd.DataFrame,
*,
vectorstore_cls: Type[VectorStore] = FAISS,
docstore: Optional[BaseStore] = None,
**kwargs: Any,
):
vectorstore = _populate_vectorstore(df, vectorstore_cls)
if docstore is None:
return vectorstore.as_retriever(**kwargs)
else:
_populate_docstore(df, docstore)
return MultiVectorRetriever(
vectorstore=vectorstore, docstore=docstore, id_key="parent", **kwargs
)
def _populate_vectorstore(
df: pd.DataFrame,
vectorstore_cls: Type[VectorStore],
) -> VectorStore:
if not hasattr(vectorstore_cls, "from_embeddings"):
raise ValueError(
f"Incompatible vector store class {vectorstore_cls}."
"Must implement `from_embeddings` class method."
)
texts_embeddings = []
metadatas = []
for _, row in df.iterrows():
texts_embeddings.append((row.metadata["text"], row["dense_embeddings"]))
metadatas.append(row.metadata)
return vectorstore_cls.from_embeddings(
texts_embeddings,
OpenAIEmbeddings(model="text-embedding-ada-002"),
metadatas=metadatas,
)
def _populate_docstore(df: pd.DataFrame, docstore: BaseStore) -> None:
parent_docs = []
df = df.copy()
df["parent"] = df.metadata.apply(itemgetter("parent"))
for parent_id, group in df.groupby("parent"):
sorted_group = group.iloc[
group.metadata.apply(itemgetter("section_index")).argsort()
]
text = "".join(sorted_group.metadata.apply(itemgetter("text")))
metadata = {
k: sorted_group.iloc[0].metadata[k] for k in ("title", "type", "url")
}
text = metadata["title"] + "\n" + text
metadata["id"] = parent_id
parent_docs.append(Document(page_content=text, metadata=metadata))
docstore.mset(((d.metadata["id"], d) for d in parent_docs))
from context import download_embeddings
df = download_embeddings("langchain")
vecstore_retriever = load_fleet_retriever(df)
vecstore_retriever.get_relevant_documents("How does the multi vector retriever work")
from langchain.storage import InMemoryStore
parent_retriever = load_fleet_retriever(
"https://www.dropbox.com/scl/fi/4rescpkrg9970s3huz47l/libraries_langchain_release.parquet?rlkey=283knw4wamezfwiidgpgptkep&dl=1",
docstore= | InMemoryStore() | langchain.storage.InMemoryStore |
from langchain.agents import AgentExecutor, Tool, ZeroShotAgent
from langchain.chains import LLMChain
from langchain.memory import ConversationBufferMemory
from langchain_community.utilities import GoogleSearchAPIWrapper
from langchain_openai import OpenAI
search = | GoogleSearchAPIWrapper() | langchain_community.utilities.GoogleSearchAPIWrapper |
meals = [
"Beef Enchiladas with Feta cheese. Mexican-Greek fusion",
"Chicken Flatbreads with red sauce. Italian-Mexican fusion",
"Veggie sweet potato quesadillas with vegan cheese",
"One-Pan Tortelonni bake with peppers and onions",
]
from langchain_openai import OpenAI
llm = OpenAI(model="gpt-3.5-turbo-instruct")
from langchain.prompts import PromptTemplate
PROMPT_TEMPLATE = """Here is the description of a meal: "{meal}".
Embed the meal into the given text: "{text_to_personalize}".
Prepend a personalized message including the user's name "{user}"
and their preference "{preference}".
Make it sound good.
"""
PROMPT = PromptTemplate(
input_variables=["meal", "text_to_personalize", "user", "preference"],
template=PROMPT_TEMPLATE,
)
import langchain_experimental.rl_chain as rl_chain
chain = rl_chain.PickBest.from_llm(llm=llm, prompt=PROMPT)
response = chain.run(
meal=rl_chain.ToSelectFrom(meals),
user=rl_chain.BasedOn("Tom"),
preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]),
text_to_personalize="This is the weeks specialty dish, our master chefs \
believe you will love it!",
)
print(response["response"])
for _ in range(5):
try:
response = chain.run(
meal=rl_chain.ToSelectFrom(meals),
user=rl_chain.BasedOn("Tom"),
preference=rl_chain.BasedOn(["Vegetarian", "regular dairy is ok"]),
text_to_personalize="This is the weeks specialty dish, our master chefs believe you will love it!",
)
except Exception as e:
print(e)
print(response["response"])
print()
scoring_criteria_template = (
"Given {preference} rank how good or bad this selection is {meal}"
)
chain = rl_chain.PickBest.from_llm(
llm=llm,
prompt=PROMPT,
selection_scorer=rl_chain.AutoSelectionScorer(
llm=llm, scoring_criteria_template_str=scoring_criteria_template
),
)
response = chain.run(
meal= | rl_chain.ToSelectFrom(meals) | langchain_experimental.rl_chain.ToSelectFrom |
from langchain.chains import HypotheticalDocumentEmbedder, LLMChain
from langchain.prompts import PromptTemplate
from langchain_openai import OpenAI, OpenAIEmbeddings
base_embeddings = OpenAIEmbeddings()
llm = OpenAI()
embeddings = HypotheticalDocumentEmbedder.from_llm(llm, base_embeddings, "web_search")
result = embeddings.embed_query("Where is the Taj Mahal?")
multi_llm = OpenAI(n=4, best_of=4)
embeddings = HypotheticalDocumentEmbedder.from_llm(
multi_llm, base_embeddings, "web_search"
)
result = embeddings.embed_query("Where is the Taj Mahal?")
prompt_template = """Please answer the user's question about the most recent state of the union address
Question: {question}
Answer:"""
prompt = PromptTemplate(input_variables=["question"], template=prompt_template)
llm_chain = | LLMChain(llm=llm, prompt=prompt) | langchain.chains.LLMChain |
import requests
def download_drive_file(url: str, output_path: str = "chat.db") -> None:
file_id = url.split("/")[-2]
download_url = f"https://drive.google.com/uc?export=download&id={file_id}"
response = requests.get(download_url)
if response.status_code != 200:
print("Failed to download the file.")
return
with open(output_path, "wb") as file:
file.write(response.content)
print(f"File {output_path} downloaded.")
url = (
"https://drive.google.com/file/d/1NebNKqTA2NXApCmeH6mu0unJD2tANZzo/view?usp=sharing"
)
download_drive_file(url)
from langchain_community.chat_loaders.imessage import IMessageChatLoader
loader = IMessageChatLoader(
path="./chat.db",
)
from typing import List
from langchain_community.chat_loaders.base import ChatSession
from langchain_community.chat_loaders.utils import (
map_ai_messages,
merge_chat_runs,
)
raw_messages = loader.lazy_load()
merged_messages = merge_chat_runs(raw_messages)
chat_sessions: List[ChatSession] = list(
map_ai_messages(merged_messages, sender="Tortoise")
)
chat_sessions[0]["messages"][:3]
from langchain.adapters.openai import convert_messages_for_finetuning
training_data = | convert_messages_for_finetuning(chat_sessions) | langchain.adapters.openai.convert_messages_for_finetuning |
import asyncio
from langchain.callbacks import get_openai_callback
from langchain_openai import OpenAI
llm = OpenAI(temperature=0)
with get_openai_callback() as cb:
llm("What is the square root of 4?")
total_tokens = cb.total_tokens
assert total_tokens > 0
with get_openai_callback() as cb:
llm("What is the square root of 4?")
llm("What is the square root of 4?")
assert cb.total_tokens == total_tokens * 2
with get_openai_callback() as cb:
await asyncio.gather(
*[llm.agenerate(["What is the square root of 4?"]) for _ in range(3)]
)
assert cb.total_tokens == total_tokens * 3
task = asyncio.create_task(llm.agenerate(["What is the square root of 4?"]))
with | get_openai_callback() | langchain.callbacks.get_openai_callback |
from langchain.output_parsers import XMLOutputParser
from langchain.prompts import PromptTemplate
from langchain_community.chat_models import ChatAnthropic
model = ChatAnthropic(model="claude-2", max_tokens_to_sample=512, temperature=0.1)
actor_query = "Generate the shortened filmography for Tom Hanks."
output = model.invoke(
f"""{actor_query}
Please enclose the movies in <movie></movie> tags"""
)
print(output.content)
parser = | XMLOutputParser() | langchain.output_parsers.XMLOutputParser |
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate, FewShotChatMessagePromptTemplate
from langchain_core.runnables import RunnableLambda
from langchain_openai import ChatOpenAI
examples = [
{
"input": "Could the members of The Police perform lawful arrests?",
"output": "what can the members of The Police do?",
},
{
"input": "Jan Sindel’s was born in what country?",
"output": "what is Jan Sindel’s personal history?",
},
]
example_prompt = ChatPromptTemplate.from_messages(
[
("human", "{input}"),
("ai", "{output}"),
]
)
few_shot_prompt = FewShotChatMessagePromptTemplate(
example_prompt=example_prompt,
examples=examples,
)
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"""You are an expert at world knowledge. Your task is to step back and paraphrase a question to a more generic step-back question, which is easier to answer. Here are a few examples:""",
),
few_shot_prompt,
("user", "{question}"),
]
)
question_gen = prompt | ChatOpenAI(temperature=0) | StrOutputParser()
question = "was chatgpt around while trump was president?"
question_gen.invoke({"question": question})
from langchain_community.utilities import DuckDuckGoSearchAPIWrapper
search = DuckDuckGoSearchAPIWrapper(max_results=4)
def retriever(query):
return search.run(query)
retriever(question)
retriever(question_gen.invoke({"question": question}))
from langchain import hub
response_prompt = | hub.pull("langchain-ai/stepback-answer") | langchain.hub.pull |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-nvidia-ai-endpoints')
import getpass
import os
if not os.environ.get("NVIDIA_API_KEY", "").startswith("nvapi-"):
nvapi_key = getpass.getpass("Enter your NVIDIA API key: ")
assert nvapi_key.startswith("nvapi-"), f"{nvapi_key[:5]}... is not a valid key"
os.environ["NVIDIA_API_KEY"] = nvapi_key
from langchain_nvidia_ai_endpoints import ChatNVIDIA
llm = ChatNVIDIA(model="mixtral_8x7b")
result = llm.invoke("Write a ballad about LangChain.")
print(result.content)
print(llm.batch(["What's 2*3?", "What's 2*6?"]))
for chunk in llm.stream("How far can a seagull fly in one day?"):
print(chunk.content, end="|")
async for chunk in llm.astream(
"How long does it take for monarch butterflies to migrate?"
):
print(chunk.content, end="|")
ChatNVIDIA.get_available_models()
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_nvidia_ai_endpoints import ChatNVIDIA
prompt = ChatPromptTemplate.from_messages(
[("system", "You are a helpful AI assistant named Fred."), ("user", "{input}")]
)
chain = prompt | ChatNVIDIA(model="llama2_13b") | StrOutputParser()
for txt in chain.stream({"input": "What's your name?"}):
print(txt, end="")
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You are an expert coding AI. Respond only in valid python; no narration whatsoever.",
),
("user", "{input}"),
]
)
chain = prompt | ChatNVIDIA(model="llama2_code_70b") | StrOutputParser()
for txt in chain.stream({"input": "How do I solve this fizz buzz problem?"}):
print(txt, end="")
from langchain_nvidia_ai_endpoints import ChatNVIDIA
llm = ChatNVIDIA(model="nemotron_steerlm_8b")
complex_result = llm.invoke(
"What's a PB&J?", labels={"creativity": 0, "complexity": 3, "verbosity": 0}
)
print("Un-creative\n")
print(complex_result.content)
print("\n\nCreative\n")
creative_result = llm.invoke(
"What's a PB&J?", labels={"creativity": 9, "complexity": 3, "verbosity": 9}
)
print(creative_result.content)
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_nvidia_ai_endpoints import ChatNVIDIA
prompt = | ChatPromptTemplate.from_messages(
[("system", "You are a helpful AI assistant named Fred."), ("user", "{input}") | langchain_core.prompts.ChatPromptTemplate.from_messages |
from getpass import getpass
DEEPINFRA_API_TOKEN = getpass()
import os
os.environ["DEEPINFRA_API_TOKEN"] = DEEPINFRA_API_TOKEN
from langchain_community.chat_models import ChatDeepInfra
from langchain_core.messages import HumanMessage
chat = | ChatDeepInfra(model="meta-llama/Llama-2-7b-chat-hf") | langchain_community.chat_models.ChatDeepInfra |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet vald-client-python')
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import Vald
from langchain_text_splitters import CharacterTextSplitter
raw_documents = TextLoader("state_of_the_union.txt").load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
documents = text_splitter.split_documents(raw_documents)
embeddings = HuggingFaceEmbeddings()
db = Vald.from_documents(documents, embeddings, host="localhost", port=8080)
query = "What did the president say about Ketanji Brown Jackson"
docs = db.similarity_search(query)
docs[0].page_content
embedding_vector = embeddings.embed_query(query)
docs = db.similarity_search_by_vector(embedding_vector)
docs[0].page_content
docs_and_scores = db.similarity_search_with_score(query)
docs_and_scores[0]
retriever = db.as_retriever(search_type="mmr")
retriever.get_relevant_documents(query)
db.max_marginal_relevance_search(query, k=2, fetch_k=10)
import grpc
with open("test_root_cacert.crt", "rb") as root:
credentials = grpc.ssl_channel_credentials(root_certificates=root.read())
with open(".ztoken", "rb") as ztoken:
token = ztoken.read().strip()
metadata = [(b"athenz-role-auth", token)]
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import Vald
from langchain_text_splitters import CharacterTextSplitter
raw_documents = | TextLoader("state_of_the_union.txt") | langchain_community.document_loaders.TextLoader |
from langchain_community.chat_message_histories import StreamlitChatMessageHistory
history = StreamlitChatMessageHistory(key="chat_messages")
history.add_user_message("hi!")
history.add_ai_message("whats up?")
history.messages
from langchain_community.chat_message_histories import StreamlitChatMessageHistory
msgs = StreamlitChatMessageHistory(key="special_app_key")
if len(msgs.messages) == 0:
msgs.add_ai_message("How can I help you?")
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_openai import ChatOpenAI
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are an AI chatbot having a conversation with a human."),
| MessagesPlaceholder(variable_name="history") | langchain_core.prompts.MessagesPlaceholder |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet langchain-nvidia-ai-endpoints')
import getpass
import os
if not os.environ.get("NVIDIA_API_KEY", "").startswith("nvapi-"):
nvapi_key = getpass.getpass("Enter your NVIDIA API key: ")
assert nvapi_key.startswith("nvapi-"), f"{nvapi_key[:5]}... is not a valid key"
os.environ["NVIDIA_API_KEY"] = nvapi_key
from langchain_nvidia_ai_endpoints import ChatNVIDIA
llm = ChatNVIDIA(model="mixtral_8x7b")
result = llm.invoke("Write a ballad about LangChain.")
print(result.content)
print(llm.batch(["What's 2*3?", "What's 2*6?"]))
for chunk in llm.stream("How far can a seagull fly in one day?"):
print(chunk.content, end="|")
async for chunk in llm.astream(
"How long does it take for monarch butterflies to migrate?"
):
print(chunk.content, end="|")
ChatNVIDIA.get_available_models()
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_nvidia_ai_endpoints import ChatNVIDIA
prompt = ChatPromptTemplate.from_messages(
[("system", "You are a helpful AI assistant named Fred."), ("user", "{input}")]
)
chain = prompt | ChatNVIDIA(model="llama2_13b") | StrOutputParser()
for txt in chain.stream({"input": "What's your name?"}):
print(txt, end="")
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You are an expert coding AI. Respond only in valid python; no narration whatsoever.",
),
("user", "{input}"),
]
)
chain = prompt | ChatNVIDIA(model="llama2_code_70b") | StrOutputParser()
for txt in chain.stream({"input": "How do I solve this fizz buzz problem?"}):
print(txt, end="")
from langchain_nvidia_ai_endpoints import ChatNVIDIA
llm = ChatNVIDIA(model="nemotron_steerlm_8b")
complex_result = llm.invoke(
"What's a PB&J?", labels={"creativity": 0, "complexity": 3, "verbosity": 0}
)
print("Un-creative\n")
print(complex_result.content)
print("\n\nCreative\n")
creative_result = llm.invoke(
"What's a PB&J?", labels={"creativity": 9, "complexity": 3, "verbosity": 9}
)
print(creative_result.content)
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_nvidia_ai_endpoints import ChatNVIDIA
prompt = ChatPromptTemplate.from_messages(
[("system", "You are a helpful AI assistant named Fred."), ("user", "{input}")]
)
chain = (
prompt
| ChatNVIDIA(model="nemotron_steerlm_8b").bind(
labels={"creativity": 9, "complexity": 0, "verbosity": 9}
)
| StrOutputParser()
)
for txt in chain.stream({"input": "Why is a PB&J?"}):
print(txt, end="")
import IPython
import requests
image_url = "https://www.nvidia.com/content/dam/en-zz/Solutions/research/ai-playground/nvidia-picasso-3c33-p@2x.jpg" ## Large Image
image_content = requests.get(image_url).content
IPython.display.Image(image_content)
from langchain_nvidia_ai_endpoints import ChatNVIDIA
llm = ChatNVIDIA(model="playground_neva_22b")
from langchain_core.messages import HumanMessage
llm.invoke(
[
HumanMessage(
content=[
{"type": "text", "text": "Describe this image:"},
{"type": "image_url", "image_url": {"url": image_url}},
]
)
]
)
from langchain_core.messages import HumanMessage
llm.invoke(
[
HumanMessage(
content=[
{"type": "text", "text": "Describe this image:"},
{"type": "image_url", "image_url": {"url": image_url}},
]
)
],
labels={"creativity": 0, "quality": 9, "complexity": 0, "verbosity": 0},
)
import IPython
import requests
image_url = "https://picsum.photos/seed/kitten/300/200"
image_content = requests.get(image_url).content
IPython.display.Image(image_content)
import base64
from langchain_core.messages import HumanMessage
b64_string = base64.b64encode(image_content).decode("utf-8")
llm.invoke(
[
HumanMessage(
content=[
{"type": "text", "text": "Describe this image:"},
{
"type": "image_url",
"image_url": {"url": f"data:image/png;base64,{b64_string}"},
},
]
)
]
)
base64_with_mime_type = f"data:image/png;base64,{b64_string}"
llm.invoke(f'What\'s in this image?\n<img src="{base64_with_mime_type}" />')
from langchain_nvidia_ai_endpoints import ChatNVIDIA
kosmos = ChatNVIDIA(model="kosmos_2")
from langchain_core.messages import HumanMessage
def drop_streaming_key(d):
"""Takes in payload dictionary, outputs new payload dictionary"""
if "stream" in d:
d.pop("stream")
return d
kosmos = ChatNVIDIA(model="kosmos_2")
kosmos.client.payload_fn = drop_streaming_key
kosmos.invoke(
[
HumanMessage(
content=[
{"type": "text", "text": "Describe this image:"},
{"type": "image_url", "image_url": {"url": image_url}},
]
)
]
)
import base64
from io import BytesIO
from PIL import Image
img_gen = | ChatNVIDIA(model="sdxl_turbo") | langchain_nvidia_ai_endpoints.ChatNVIDIA |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet modal')
get_ipython().system('modal token new')
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_community.llms import Modal
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate.from_template(template)
endpoint_url = "https://ecorp--custom-llm-endpoint.modal.run" # REPLACE ME with your deployed Modal web endpoint's URL
llm = | Modal(endpoint_url=endpoint_url) | langchain_community.llms.Modal |
from langchain_experimental.llm_bash.base import LLMBashChain
from langchain_openai import OpenAI
llm = OpenAI(temperature=0)
text = "Please write a bash script that prints 'Hello World' to the console."
bash_chain = LLMBashChain.from_llm(llm, verbose=True)
bash_chain.run(text)
from langchain.prompts.prompt import PromptTemplate
from langchain_experimental.llm_bash.prompt import BashOutputParser
_PROMPT_TEMPLATE = """If someone asks you to perform a task, your job is to come up with a series of bash commands that will perform the task. There is no need to put "#!/bin/bash" in your answer. Make sure to reason step by step, using this format:
Question: "copy the files in the directory named 'target' into a new directory at the same level as target called 'myNewDirectory'"
I need to take the following actions:
- List all files in the directory
- Create a new directory
- Copy the files from the first directory into the second directory
```bash
ls
mkdir myNewDirectory
cp -r target/* myNewDirectory
```
Do not use 'echo' when writing the script.
That is the format. Begin!
Question: {question}"""
PROMPT = PromptTemplate(
input_variables=["question"],
template=_PROMPT_TEMPLATE,
output_parser=BashOutputParser(),
)
bash_chain = LLMBashChain.from_llm(llm, prompt=PROMPT, verbose=True)
text = "Please write a bash script that prints 'Hello World' to the console."
bash_chain.run(text)
from langchain_experimental.llm_bash.bash import BashProcess
persistent_process = | BashProcess(persistent=True) | langchain_experimental.llm_bash.bash.BashProcess |
SOURCE = "test" # @param {type:"Query"|"CollectionGroup"|"DocumentReference"|"string"}
get_ipython().run_line_magic('pip', 'install -upgrade --quiet langchain-google-firestore')
PROJECT_ID = "my-project-id" # @param {type:"string"}
get_ipython().system('gcloud config set project {PROJECT_ID}')
from google.colab import auth
auth.authenticate_user()
get_ipython().system('gcloud services enable firestore.googleapis.com')
from langchain_core.documents.base import Document
from langchain_google_firestore import FirestoreSaver
saver = FirestoreSaver()
data = [Document(page_content="Hello, World!")]
saver.upsert_documents(data)
saver = FirestoreSaver("Collection")
saver.upsert_documents(data)
doc_ids = ["AnotherCollection/doc_id", "foo/bar"]
saver = FirestoreSaver()
saver.upsert_documents(documents=data, document_ids=doc_ids)
from langchain_google_firestore import FirestoreLoader
loader_collection = FirestoreLoader("Collection")
loader_subcollection = FirestoreLoader("Collection/doc/SubCollection")
data_collection = loader_collection.load()
data_subcollection = loader_subcollection.load()
from google.cloud import firestore
client = firestore.Client()
doc_ref = client.collection("foo").document("bar")
loader_document = FirestoreLoader(doc_ref)
data = loader_document.load()
from google.cloud.firestore import CollectionGroup, FieldFilter, Query
col_ref = client.collection("col_group")
collection_group = CollectionGroup(col_ref)
loader_group = FirestoreLoader(collection_group)
col_ref = client.collection("collection")
query = col_ref.where(filter=FieldFilter("region", "==", "west_coast"))
loader_query = FirestoreLoader(query)
saver = | FirestoreSaver() | langchain_google_firestore.FirestoreSaver |
from langchain_community.chat_models.llama_edge import LlamaEdgeChatService
from langchain_core.messages import HumanMessage, SystemMessage
service_url = "https://b008-54-186-154-209.ngrok-free.app"
chat = LlamaEdgeChatService(service_url=service_url)
system_message = SystemMessage(content="You are an AI assistant")
user_message = HumanMessage(content="What is the capital of France?")
messages = [system_message, user_message]
response = chat(messages)
print(f"[Bot] {response.content}")
service_url = "https://b008-54-186-154-209.ngrok-free.app"
chat = | LlamaEdgeChatService(service_url=service_url, streaming=True) | langchain_community.chat_models.llama_edge.LlamaEdgeChatService |
import getpass
import os
os.environ["TAVILY_API_KEY"] = getpass.getpass()
from langchain_community.tools.tavily_search import TavilySearchResults
tool = | TavilySearchResults() | langchain_community.tools.tavily_search.TavilySearchResults |
from langchain_openai import OpenAIEmbeddings
from langchain_pinecone import PineconeVectorStore
all_documents = {
"doc1": "Climate change and economic impact.",
"doc2": "Public health concerns due to climate change.",
"doc3": "Climate change: A social perspective.",
"doc4": "Technological solutions to climate change.",
"doc5": "Policy changes needed to combat climate change.",
"doc6": "Climate change and its impact on biodiversity.",
"doc7": "Climate change: The science and models.",
"doc8": "Global warming: A subset of climate change.",
"doc9": "How climate change affects daily weather.",
"doc10": "The history of climate change activism.",
}
vectorstore = PineconeVectorStore.from_texts(
list(all_documents.values()), OpenAIEmbeddings(), index_name="rag-fusion"
)
from langchain_core.output_parsers import StrOutputParser
from langchain_openai import ChatOpenAI
from langchain import hub
prompt = hub.pull("langchain-ai/rag-fusion-query-generation")
generate_queries = (
prompt | ChatOpenAI(temperature=0) | StrOutputParser() | (lambda x: x.split("\n"))
)
original_query = "impact of climate change"
vectorstore = PineconeVectorStore.from_existing_index("rag-fusion", OpenAIEmbeddings())
retriever = vectorstore.as_retriever()
from langchain.load import dumps, loads
def reciprocal_rank_fusion(results: list[list], k=60):
fused_scores = {}
for docs in results:
for rank, doc in enumerate(docs):
doc_str = dumps(doc)
if doc_str not in fused_scores:
fused_scores[doc_str] = 0
previous_score = fused_scores[doc_str]
fused_scores[doc_str] += 1 / (rank + k)
reranked_results = [
( | loads(doc) | langchain.load.loads |
from langchain.chains import ConversationChain
from langchain.memory import (
CombinedMemory,
ConversationBufferMemory,
ConversationSummaryMemory,
)
from langchain.prompts import PromptTemplate
from langchain_openai import OpenAI
conv_memory = ConversationBufferMemory(
memory_key="chat_history_lines", input_key="input"
)
summary_memory = ConversationSummaryMemory(llm=OpenAI(), input_key="input")
memory = | CombinedMemory(memories=[conv_memory, summary_memory]) | langchain.memory.CombinedMemory |
import nest_asyncio
from langchain.chains.graph_qa import GremlinQAChain
from langchain.schema import Document
from langchain_community.graphs import GremlinGraph
from langchain_community.graphs.graph_document import GraphDocument, Node, Relationship
from langchain_openai import AzureChatOpenAI
cosmosdb_name = "mycosmosdb"
cosmosdb_db_id = "graphtesting"
cosmosdb_db_graph_id = "mygraph"
cosmosdb_access_Key = "longstring=="
graph = GremlinGraph(
url=f"=wss://{cosmosdb_name}.gremlin.cosmos.azure.com:443/",
username=f"/dbs/{cosmosdb_db_id}/colls/{cosmosdb_db_graph_id}",
password=cosmosdb_access_Key,
)
source_doc = Document(
page_content="Matrix is a movie where Keanu Reeves, Laurence Fishburne and Carrie-Anne Moss acted."
)
movie = Node(id="The Matrix", properties={"label": "movie", "title": "The Matrix"})
actor1 = | Node(id="Keanu Reeves", properties={"label": "actor", "name": "Keanu Reeves"}) | langchain_community.graphs.graph_document.Node |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet pymysql')
get_ipython().system('pip install sqlalchemy')
get_ipython().system('pip install langchain')
from langchain.chains import RetrievalQA
from langchain_community.document_loaders import (
DirectoryLoader,
UnstructuredMarkdownLoader,
)
from langchain_community.vectorstores.apache_doris import (
ApacheDoris,
ApacheDorisSettings,
)
from langchain_openai import OpenAI, OpenAIEmbeddings
from langchain_text_splitters import TokenTextSplitter
update_vectordb = False
loader = DirectoryLoader(
"./docs", glob="**/*.md", loader_cls=UnstructuredMarkdownLoader
)
documents = loader.load()
text_splitter = TokenTextSplitter(chunk_size=400, chunk_overlap=50)
split_docs = text_splitter.split_documents(documents)
update_vectordb = True
def gen_apache_doris(update_vectordb, embeddings, settings):
if update_vectordb:
docsearch = | ApacheDoris.from_documents(split_docs, embeddings, config=settings) | langchain_community.vectorstores.apache_doris.ApacheDoris.from_documents |
from langchain_community.document_loaders.csv_loader import CSVLoader
loader = CSVLoader(
file_path="../../document_loaders/examples/example_data/mlb_teams_2012.csv"
)
data = loader.load()
import json
from typing import List
from langchain.docstore.document import Document
def write_json(path: str, documents: List[Document]) -> None:
results = [{"text": doc.page_content} for doc in documents]
with open(path, "w") as f:
json.dump(results, f, indent=2)
write_json("foo.json", data)
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
from langchain.retrievers import ChatGPTPluginRetriever
retriever = | ChatGPTPluginRetriever(url="http://0.0.0.0:8000", bearer_token="foo") | langchain.retrievers.ChatGPTPluginRetriever |
SOURCE = "test" # @param {type:"Query"|"CollectionGroup"|"DocumentReference"|"string"}
get_ipython().run_line_magic('pip', 'install -upgrade --quiet langchain-google-datastore')
PROJECT_ID = "my-project-id" # @param {type:"string"}
get_ipython().system('gcloud config set project {PROJECT_ID}')
from google.colab import auth
auth.authenticate_user()
get_ipython().system('gcloud services enable datastore.googleapis.com')
from langchain_core.documents import Document
from langchain_google_datastore import DatastoreSaver
data = [Document(page_content="Hello, World!")]
saver = | DatastoreSaver() | langchain_google_datastore.DatastoreSaver |
get_ipython().run_line_magic('pip', 'install --upgrade --quiet xata langchain-openai tiktoken langchain')
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
api_key = getpass.getpass("Xata API key: ")
db_url = input("Xata database URL (copy it from your DB settings):")
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores.xata import XataVectorStore
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
loader = | TextLoader("../../modules/state_of_the_union.txt") | langchain_community.document_loaders.TextLoader |
from langchain.chains import LLMMathChain
from langchain_community.utilities import DuckDuckGoSearchAPIWrapper
from langchain_core.tools import Tool
from langchain_experimental.plan_and_execute import (
PlanAndExecute,
load_agent_executor,
load_chat_planner,
)
from langchain_openai import ChatOpenAI, OpenAI
search = DuckDuckGoSearchAPIWrapper()
llm = OpenAI(temperature=0)
llm_math_chain = | LLMMathChain.from_llm(llm=llm, verbose=True) | langchain.chains.LLMMathChain.from_llm |
get_ipython().run_line_magic('pip', "install --upgrade --quiet langchain-openai 'deeplake[enterprise]' tiktoken")
from langchain_community.vectorstores import DeepLake
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
import getpass
import os
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
activeloop_token = getpass.getpass("activeloop token:")
embeddings = OpenAIEmbeddings()
from langchain_community.document_loaders import TextLoader
loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
db = DeepLake(dataset_path="./my_deeplake/", embedding=embeddings, overwrite=True)
db.add_documents(docs)
query = "What did the president say about Ketanji Brown Jackson"
docs = db.similarity_search(query)
print(docs[0].page_content)
db = DeepLake(dataset_path="./my_deeplake/", embedding=embeddings, read_only=True)
docs = db.similarity_search(query)
from langchain.chains import RetrievalQA
from langchain_openai import OpenAIChat
qa = RetrievalQA.from_chain_type(
llm= | OpenAIChat(model="gpt-3.5-turbo") | langchain_openai.OpenAIChat |