prompt
stringlengths 70
19.8k
| completion
stringlengths 8
303
| api
stringlengths 23
93
|
---|---|---|
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia')
get_ipython().run_line_magic('pip', 'install llama-index-finetuning')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-finetuning-callbacks')
get_ipython().run_line_magic('pip', 'install llama-index-llms-huggingface')
import nest_asyncio
nest_asyncio.apply()
import os
HUGGING_FACE_TOKEN = os.getenv("HUGGING_FACE_TOKEN")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
import pandas as pd
def display_eval_df(question, source, answer_a, answer_b, result) -> None:
"""Pretty print question/answer + gpt-4 judgement dataset."""
eval_df = pd.DataFrame(
{
"Question": question,
"Source": source,
"Model A": answer_a["model"],
"Answer A": answer_a["text"],
"Model B": answer_b["model"],
"Answer B": answer_b["text"],
"Score": result.score,
"Judgement": result.feedback,
},
index=[0],
)
eval_df = eval_df.style.set_properties(
**{
"inline-size": "300px",
"overflow-wrap": "break-word",
},
subset=["Answer A", "Answer B"]
)
display(eval_df)
get_ipython().system('pip install wikipedia -q')
from llama_index.readers.wikipedia import WikipediaReader
train_cities = [
"San Francisco",
"Toronto",
"New York",
"Vancouver",
"Montreal",
"Boston",
]
test_cities = [
"Tokyo",
"Singapore",
"Paris",
]
train_documents = WikipediaReader().load_data(
pages=[f"History of {x}" for x in train_cities]
)
test_documents = WikipediaReader().load_data(
pages=[f"History of {x}" for x in test_cities]
)
QUESTION_GEN_PROMPT = (
"You are a Teacher/ Professor. Your task is to setup "
"a quiz/examination. Using the provided context, formulate "
"a single question that captures an important fact from the "
"context. Restrict the question to the context information provided."
)
from llama_index.core.evaluation import DatasetGenerator
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-3.5-turbo", temperature=0.3)
train_dataset_generator = DatasetGenerator.from_documents(
train_documents,
question_gen_query=QUESTION_GEN_PROMPT,
llm=llm,
show_progress=True,
num_questions_per_chunk=25,
)
test_dataset_generator = DatasetGenerator.from_documents(
test_documents,
question_gen_query=QUESTION_GEN_PROMPT,
llm=llm,
show_progress=True,
num_questions_per_chunk=25,
)
train_questions = train_dataset_generator.generate_questions_from_nodes(
num=200
)
test_questions = test_dataset_generator.generate_questions_from_nodes(num=150)
len(train_questions), len(test_questions)
train_questions[:3]
test_questions[:3]
from llama_index.core import VectorStoreIndex
from llama_index.core.retrievers import VectorIndexRetriever
train_index = VectorStoreIndex.from_documents(documents=train_documents)
train_retriever = VectorIndexRetriever(
index=train_index,
similarity_top_k=2,
)
test_index = VectorStoreIndex.from_documents(documents=test_documents)
test_retriever = VectorIndexRetriever(
index=test_index,
similarity_top_k=2,
)
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.llms.huggingface import HuggingFaceInferenceAPI
def create_query_engine(
hf_name: str, retriever: VectorIndexRetriever, hf_llm_generators: dict
) -> RetrieverQueryEngine:
"""Create a RetrieverQueryEngine using the HuggingFaceInferenceAPI LLM"""
if hf_name not in hf_llm_generators:
raise KeyError("model not listed in hf_llm_generators")
llm = HuggingFaceInferenceAPI(
model_name=hf_llm_generators[hf_name],
context_window=2048, # to use refine
token=HUGGING_FACE_TOKEN,
)
return RetrieverQueryEngine.from_args(retriever=retriever, llm=llm)
hf_llm_generators = {
"mistral-7b-instruct": "mistralai/Mistral-7B-Instruct-v0.1",
"llama2-7b-chat": "meta-llama/Llama-2-7b-chat-hf",
}
train_query_engines = {
mdl: create_query_engine(mdl, train_retriever, hf_llm_generators)
for mdl in hf_llm_generators.keys()
}
test_query_engines = {
mdl: create_query_engine(mdl, test_retriever, hf_llm_generators)
for mdl in hf_llm_generators.keys()
}
import tqdm
import random
train_dataset = []
for q in tqdm.tqdm(train_questions):
model_versus = random.sample(list(train_query_engines.items()), 2)
data_entry = {"question": q}
responses = []
source = None
for name, engine in model_versus:
response = engine.query(q)
response_struct = {}
response_struct["model"] = name
response_struct["text"] = str(response)
if source is not None:
assert source == response.source_nodes[0].node.text[:1000] + "..."
else:
source = response.source_nodes[0].node.text[:1000] + "..."
responses.append(response_struct)
data_entry["answers"] = responses
data_entry["source"] = source
train_dataset.append(data_entry)
from llama_index.llms.openai import OpenAI
from llama_index.finetuning.callbacks import OpenAIFineTuningHandler
from llama_index.core.callbacks import CallbackManager
from llama_index.core.evaluation import PairwiseComparisonEvaluator
from llama_index.core import Settings
main_finetuning_handler = | OpenAIFineTuningHandler() | llama_index.finetuning.callbacks.OpenAIFineTuningHandler |
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-postgres')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface')
get_ipython().run_line_magic('pip', 'install llama-index-llms-llama-cpp')
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en")
get_ipython().system('pip install llama-cpp-python')
from llama_index.llms.llama_cpp import LlamaCPP
model_url = "https://huggingface.co/TheBloke/Llama-2-13B-chat-GGUF/resolve/main/llama-2-13b-chat.Q4_0.gguf"
llm = LlamaCPP(
model_url=model_url,
model_path=None,
temperature=0.1,
max_new_tokens=256,
context_window=3900,
generate_kwargs={},
model_kwargs={"n_gpu_layers": 1},
verbose=True,
)
get_ipython().system('pip install psycopg2-binary pgvector asyncpg "sqlalchemy[asyncio]" greenlet')
import psycopg2
db_name = "vector_db"
host = "localhost"
password = "password"
port = "5432"
user = "jerry"
conn = psycopg2.connect(
dbname="postgres",
host=host,
password=password,
port=port,
user=user,
)
conn.autocommit = True
with conn.cursor() as c:
c.execute(f"DROP DATABASE IF EXISTS {db_name}")
c.execute(f"CREATE DATABASE {db_name}")
from sqlalchemy import make_url
from llama_index.vector_stores.postgres import PGVectorStore
vector_store = PGVectorStore.from_params(
database=db_name,
host=host,
password=password,
port=port,
user=user,
table_name="llama2_paper",
embed_dim=384, # openai embedding dimension
)
get_ipython().system('mkdir data')
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from pathlib import Path
from llama_index.readers.file import PyMuPDFReader
loader = PyMuPDFReader()
documents = loader.load(file_path="./data/llama2.pdf")
from llama_index.core.node_parser import SentenceSplitter
text_parser = SentenceSplitter(
chunk_size=1024,
)
text_chunks = []
doc_idxs = []
for doc_idx, doc in enumerate(documents):
cur_text_chunks = text_parser.split_text(doc.text)
text_chunks.extend(cur_text_chunks)
doc_idxs.extend([doc_idx] * len(cur_text_chunks))
from llama_index.core.schema import TextNode
nodes = []
for idx, text_chunk in enumerate(text_chunks):
node = TextNode(
text=text_chunk,
)
src_doc = documents[doc_idxs[idx]]
node.metadata = src_doc.metadata
nodes.append(node)
for node in nodes:
node_embedding = embed_model.get_text_embedding(
node.get_content(metadata_mode="all")
)
node.embedding = node_embedding
vector_store.add(nodes)
query_str = "Can you tell me about the key concepts for safety finetuning"
query_embedding = embed_model.get_query_embedding(query_str)
from llama_index.core.vector_stores import VectorStoreQuery
query_mode = "default"
vector_store_query = VectorStoreQuery(
query_embedding=query_embedding, similarity_top_k=2, mode=query_mode
)
query_result = vector_store.query(vector_store_query)
print(query_result.nodes[0].get_content())
from llama_index.core.schema import NodeWithScore
from typing import Optional
nodes_with_scores = []
for index, node in enumerate(query_result.nodes):
score: Optional[float] = None
if query_result.similarities is not None:
score = query_result.similarities[index]
nodes_with_scores.append(NodeWithScore(node=node, score=score))
from llama_index.core import QueryBundle
from llama_index.core.retrievers import BaseRetriever
from typing import Any, List
class VectorDBRetriever(BaseRetriever):
"""Retriever over a postgres vector store."""
def __init__(
self,
vector_store: PGVectorStore,
embed_model: Any,
query_mode: str = "default",
similarity_top_k: int = 2,
) -> None:
"""Init params."""
self._vector_store = vector_store
self._embed_model = embed_model
self._query_mode = query_mode
self._similarity_top_k = similarity_top_k
super().__init__()
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
"""Retrieve."""
query_embedding = embed_model.get_query_embedding(
query_bundle.query_str
)
vector_store_query = VectorStoreQuery(
query_embedding=query_embedding,
similarity_top_k=self._similarity_top_k,
mode=self._query_mode,
)
query_result = vector_store.query(vector_store_query)
nodes_with_scores = []
for index, node in enumerate(query_result.nodes):
score: Optional[float] = None
if query_result.similarities is not None:
score = query_result.similarities[index]
nodes_with_scores.append(NodeWithScore(node=node, score=score))
return nodes_with_scores
retriever = VectorDBRetriever(
vector_store, embed_model, query_mode="default", similarity_top_k=2
)
from llama_index.core.query_engine import RetrieverQueryEngine
query_engine = | RetrieverQueryEngine.from_args(retriever, llm=llm) | llama_index.core.query_engine.RetrieverQueryEngine.from_args |
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
from pydantic import BaseModel
from unstructured.partition.html import partition_html
import pandas as pd
pd.set_option("display.max_rows", None)
pd.set_option("display.max_columns", None)
pd.set_option("display.width", None)
pd.set_option("display.max_colwidth", None)
get_ipython().system('wget "https://www.dropbox.com/scl/fi/mlaymdy1ni1ovyeykhhuk/tesla_2021_10k.htm?rlkey=qf9k4zn0ejrbm716j0gg7r802&dl=1" -O tesla_2021_10k.htm')
get_ipython().system('wget "https://www.dropbox.com/scl/fi/rkw0u959yb4w8vlzz76sa/tesla_2020_10k.htm?rlkey=tfkdshswpoupav5tqigwz1mp7&dl=1" -O tesla_2020_10k.htm')
from llama_index.readers.file import FlatReader
from pathlib import Path
reader = | FlatReader() | llama_index.readers.file.FlatReader |
from utils import get_train_str, get_train_and_eval_data, get_eval_preds, train_prompt
import warnings
warnings.filterwarnings("ignore")
warnings.simplefilter("ignore")
train_df, train_labels, eval_df, eval_labels = get_train_and_eval_data("data/train.csv")
print(train_prompt.template)
train_n = 10
eval_n = 40
train_str = get_train_str(train_df, train_labels, train_n=train_n)
print(f"Example datapoints in `train_str`: \n{train_str}")
from sklearn.metrics import accuracy_score
import numpy as np
eval_preds = get_eval_preds(train_prompt, train_str, eval_df, n=eval_n)
eval_label_chunk = eval_labels[:eval_n]
acc = accuracy_score(eval_label_chunk, np.array(eval_preds).round())
print(f"ACCURACY: {acc}")
from sklearn.metrics import accuracy_score
import numpy as np
eval_preds_null = get_eval_preds(train_prompt, "", eval_df, n=eval_n)
eval_label_chunk = eval_labels[:eval_n]
acc_null = accuracy_score(eval_label_chunk, np.array(eval_preds_null).round())
print(f"ACCURACY: {acc_null}")
from llama_index import SummaryIndex
from llama_index.schema import Document
index = SummaryIndex([])
batch_size = 40
num_train_chunks = 5
for i in range(num_train_chunks):
print(f"Inserting chunk: {i}/{num_train_chunks}")
start_idx = i * batch_size
end_idx = (i + 1) * batch_size
train_batch = train_df.iloc[start_idx : end_idx + batch_size]
labels_batch = train_labels.iloc[start_idx : end_idx + batch_size]
all_train_str = get_train_str(train_batch, labels_batch, train_n=batch_size)
index.insert( | Document(text=all_train_str) | llama_index.schema.Document |
from llama_index.core import SQLDatabase
from sqlalchemy import (
create_engine,
MetaData,
Table,
Column,
String,
Integer,
select,
column,
)
engine = create_engine("sqlite:///chinook.db")
sql_database = SQLDatabase(engine)
from llama_index.core.query_pipeline import QueryPipeline
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('curl "https://www.sqlitetutorial.net/wp-content/uploads/2018/03/chinook.zip" -O ./chinook.zip')
get_ipython().system('unzip ./chinook.zip')
from llama_index.core.settings import Settings
from llama_index.core.callbacks import CallbackManager
callback_manager = CallbackManager()
Settings.callback_manager = callback_manager
import phoenix as px
import llama_index.core
px.launch_app()
llama_index.core.set_global_handler("arize_phoenix")
from llama_index.core.query_engine import NLSQLTableQueryEngine
from llama_index.core.tools import QueryEngineTool
sql_query_engine = NLSQLTableQueryEngine(
sql_database=sql_database,
tables=["albums", "tracks", "artists"],
verbose=True,
)
sql_tool = QueryEngineTool.from_defaults(
query_engine=sql_query_engine,
name="sql_tool",
description=(
"Useful for translating a natural language query into a SQL query"
),
)
from llama_index.core.query_pipeline import QueryPipeline as QP
qp = QP(verbose=True)
from llama_index.core.agent.react.types import (
ActionReasoningStep,
ObservationReasoningStep,
ResponseReasoningStep,
)
from llama_index.core.agent import Task, AgentChatResponse
from llama_index.core.query_pipeline import (
AgentInputComponent,
AgentFnComponent,
CustomAgentComponent,
QueryComponent,
ToolRunnerComponent,
)
from llama_index.core.llms import MessageRole
from typing import Dict, Any, Optional, Tuple, List, cast
def agent_input_fn(task: Task, state: Dict[str, Any]) -> Dict[str, Any]:
"""Agent input function.
Returns:
A Dictionary of output keys and values. If you are specifying
src_key when defining links between this component and other
components, make sure the src_key matches the specified output_key.
"""
if "current_reasoning" not in state:
state["current_reasoning"] = []
reasoning_step = ObservationReasoningStep(observation=task.input)
state["current_reasoning"].append(reasoning_step)
return {"input": task.input}
agent_input_component = AgentInputComponent(fn=agent_input_fn)
from llama_index.core.agent import ReActChatFormatter
from llama_index.core.query_pipeline import InputComponent, Link
from llama_index.core.llms import ChatMessage
from llama_index.core.tools import BaseTool
def react_prompt_fn(
task: Task, state: Dict[str, Any], input: str, tools: List[BaseTool]
) -> List[ChatMessage]:
chat_formatter = ReActChatFormatter()
return chat_formatter.format(
tools,
chat_history=task.memory.get() + state["memory"].get_all(),
current_reasoning=state["current_reasoning"],
)
react_prompt_component = AgentFnComponent(
fn=react_prompt_fn, partial_dict={"tools": [sql_tool]}
)
from typing import Set, Optional
from llama_index.core.agent.react.output_parser import ReActOutputParser
from llama_index.core.llms import ChatResponse
from llama_index.core.agent.types import Task
def parse_react_output_fn(
task: Task, state: Dict[str, Any], chat_response: ChatResponse
):
"""Parse ReAct output into a reasoning step."""
output_parser = ReActOutputParser()
reasoning_step = output_parser.parse(chat_response.message.content)
return {"done": reasoning_step.is_done, "reasoning_step": reasoning_step}
parse_react_output = AgentFnComponent(fn=parse_react_output_fn)
def run_tool_fn(
task: Task, state: Dict[str, Any], reasoning_step: ActionReasoningStep
):
"""Run tool and process tool output."""
tool_runner_component = ToolRunnerComponent(
[sql_tool], callback_manager=task.callback_manager
)
tool_output = tool_runner_component.run_component(
tool_name=reasoning_step.action,
tool_input=reasoning_step.action_input,
)
observation_step = ObservationReasoningStep(observation=str(tool_output))
state["current_reasoning"].append(observation_step)
return {"response_str": observation_step.get_content(), "is_done": False}
run_tool = AgentFnComponent(fn=run_tool_fn)
def process_response_fn(
task: Task, state: Dict[str, Any], response_step: ResponseReasoningStep
):
"""Process response."""
state["current_reasoning"].append(response_step)
response_str = response_step.response
state["memory"].put(ChatMessage(content=task.input, role=MessageRole.USER))
state["memory"].put(
ChatMessage(content=response_str, role=MessageRole.ASSISTANT)
)
return {"response_str": response_str, "is_done": True}
process_response = AgentFnComponent(fn=process_response_fn)
def process_agent_response_fn(
task: Task, state: Dict[str, Any], response_dict: dict
):
"""Process agent response."""
return (
AgentChatResponse(response_dict["response_str"]),
response_dict["is_done"],
)
process_agent_response = AgentFnComponent(fn=process_agent_response_fn)
from llama_index.core.query_pipeline import QueryPipeline as QP
from llama_index.llms.openai import OpenAI
qp.add_modules(
{
"agent_input": agent_input_component,
"react_prompt": react_prompt_component,
"llm": OpenAI(model="gpt-4-1106-preview"),
"react_output_parser": parse_react_output,
"run_tool": run_tool,
"process_response": process_response,
"process_agent_response": process_agent_response,
}
)
qp.add_chain(["agent_input", "react_prompt", "llm", "react_output_parser"])
qp.add_link(
"react_output_parser",
"run_tool",
condition_fn=lambda x: not x["done"],
input_fn=lambda x: x["reasoning_step"],
)
qp.add_link(
"react_output_parser",
"process_response",
condition_fn=lambda x: x["done"],
input_fn=lambda x: x["reasoning_step"],
)
qp.add_link("process_response", "process_agent_response")
qp.add_link("run_tool", "process_agent_response")
from pyvis.network import Network
net = Network(notebook=True, cdn_resources="in_line", directed=True)
net.from_nx(qp.clean_dag)
net.show("agent_dag.html")
from llama_index.core.agent import QueryPipelineAgentWorker, AgentRunner
from llama_index.core.callbacks import CallbackManager
agent_worker = QueryPipelineAgentWorker(qp)
agent = AgentRunner(
agent_worker, callback_manager=CallbackManager([]), verbose=True
)
task = agent.create_task(
"What are some tracks from the artist AC/DC? Limit it to 3"
)
step_output = agent.run_step(task.task_id)
step_output = agent.run_step(task.task_id)
step_output.is_last
response = agent.finalize_response(task.task_id)
print(str(response))
agent.reset()
response = agent.chat(
"What are some tracks from the artist AC/DC? Limit it to 3"
)
print(str(response))
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-4-1106-preview")
from llama_index.core.agent import Task, AgentChatResponse
from typing import Dict, Any
from llama_index.core.query_pipeline import (
AgentInputComponent,
AgentFnComponent,
)
def agent_input_fn(task: Task, state: Dict[str, Any]) -> Dict:
"""Agent input function."""
if "convo_history" not in state:
state["convo_history"] = []
state["count"] = 0
state["convo_history"].append(f"User: {task.input}")
convo_history_str = "\n".join(state["convo_history"]) or "None"
return {"input": task.input, "convo_history": convo_history_str}
agent_input_component = AgentInputComponent(fn=agent_input_fn)
from llama_index.core import PromptTemplate
retry_prompt_str = """\
You are trying to generate a proper natural language query given a user input.
This query will then be interpreted by a downstream text-to-SQL agent which
will convert the query to a SQL statement. If the agent triggers an error,
then that will be reflected in the current conversation history (see below).
If the conversation history is None, use the user input. If its not None,
generate a new SQL query that avoids the problems of the previous SQL query.
Input: {input}
Convo history (failed attempts):
{convo_history}
New input: """
retry_prompt = PromptTemplate(retry_prompt_str)
from llama_index.core import Response
from typing import Tuple
validate_prompt_str = """\
Given the user query, validate whether the inferred SQL query and response from executing the query is correct and answers the query.
Answer with YES or NO.
Query: {input}
Inferred SQL query: {sql_query}
SQL Response: {sql_response}
Result: """
validate_prompt = | PromptTemplate(validate_prompt_str) | llama_index.core.PromptTemplate |
get_ipython().run_line_magic('pip', 'install llama-index-llms-gemini')
get_ipython().system('pip install -q llama-index google-generativeai')
get_ipython().run_line_magic('env', 'GOOGLE_API_KEY=...')
import os
GOOGLE_API_KEY = "" # add your GOOGLE API key here
os.environ["GOOGLE_API_KEY"] = GOOGLE_API_KEY
from llama_index.llms.gemini import Gemini
resp = | Gemini() | llama_index.llms.gemini.Gemini |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-lantern')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().system('pip install psycopg2-binary llama-index asyncpg')
from llama_index.core import SimpleDirectoryReader, StorageContext
from llama_index.core import VectorStoreIndex
from llama_index.vector_stores.lantern import LanternVectorStore
import textwrap
import openai
import os
os.environ["OPENAI_API_KEY"] = "<your_key>"
openai.api_key = "<your_key>"
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham").load_data()
print("Document ID:", documents[0].doc_id)
import psycopg2
connection_string = "postgresql://postgres:postgres@localhost:5432"
db_name = "postgres"
conn = psycopg2.connect(connection_string)
conn.autocommit = True
with conn.cursor() as c:
c.execute(f"DROP DATABASE IF EXISTS {db_name}")
c.execute(f"CREATE DATABASE {db_name}")
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core import Settings
Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small")
from sqlalchemy import make_url
url = make_url(connection_string)
vector_store = LanternVectorStore.from_params(
database=db_name,
host=url.host,
password=url.password,
port=url.port,
user=url.username,
table_name="paul_graham_essay",
embed_dim=1536, # openai embedding dimension
)
storage_context = | StorageContext.from_defaults(vector_store=vector_store) | llama_index.core.StorageContext.from_defaults |
get_ipython().system('pip install llama-index')
import openai
import os
os.environ["OPENAI_API_KEY"] = "YOUR_API_KEY"
openai.api_key = os.environ["OPENAI_API_KEY"]
from typing import Any, List
from InstructorEmbedding import INSTRUCTOR
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.embeddings import BaseEmbedding
class InstructorEmbeddings(BaseEmbedding):
_model: INSTRUCTOR = PrivateAttr()
_instruction: str = | PrivateAttr() | llama_index.core.bridge.pydantic.PrivateAttr |
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-cohere-rerank')
get_ipython().system('pip install llama-index')
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
pprint_response,
)
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
index = VectorStoreIndex.from_documents(documents=documents)
import os
from llama_index.postprocessor.cohere_rerank import CohereRerank
api_key = os.environ["COHERE_API_KEY"]
cohere_rerank = CohereRerank(api_key=api_key, top_n=2)
query_engine = index.as_query_engine(
similarity_top_k=10,
node_postprocessors=[cohere_rerank],
)
response = query_engine.query(
"What did Sam Altman do in this essay?",
)
pprint_response(response)
query_engine = index.as_query_engine(
similarity_top_k=2,
)
response = query_engine.query(
"What did Sam Altman do in this essay?",
)
| pprint_response(response) | llama_index.core.pprint_response |
get_ipython().run_line_magic('pip', 'install llama-index-llms-gemini')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-google')
get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-google')
get_ipython().run_line_magic('pip', 'install llama-index-response-synthesizers-google')
get_ipython().run_line_magic('pip', 'install llama-index')
get_ipython().run_line_magic('pip', 'install "google-ai-generativelanguage>=0.4,<=1.0"')
get_ipython().run_line_magic('pip', 'install google-auth-oauthlib')
from google.oauth2 import service_account
from llama_index.vector_stores.google import set_google_config
credentials = service_account.Credentials.from_service_account_file(
"service_account_key.json",
scopes=[
"https://www.googleapis.com/auth/generative-language.retriever",
],
)
set_google_config(auth_credentials=credentials)
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
import llama_index.core.vector_stores.google.generativeai.genai_extension as genaix
from typing import Iterable
from random import randrange
LLAMA_INDEX_COLAB_CORPUS_ID_PREFIX = f"llama-index-colab"
SESSION_CORPUS_ID_PREFIX = (
f"{LLAMA_INDEX_COLAB_CORPUS_ID_PREFIX}-{randrange(1000000)}"
)
def corpus_id(num_id: int) -> str:
return f"{SESSION_CORPUS_ID_PREFIX}-{num_id}"
SESSION_CORPUS_ID = corpus_id(1)
def list_corpora() -> Iterable[genaix.Corpus]:
client = genaix.build_semantic_retriever()
yield from genaix.list_corpora(client=client)
def delete_corpus(*, corpus_id: str) -> None:
client = genaix.build_semantic_retriever()
genaix.delete_corpus(corpus_id=corpus_id, client=client)
def cleanup_colab_corpora():
for corpus in list_corpora():
if corpus.corpus_id.startswith(LLAMA_INDEX_COLAB_CORPUS_ID_PREFIX):
try:
delete_corpus(corpus_id=corpus.corpus_id)
print(f"Deleted corpus {corpus.corpus_id}.")
except Exception:
pass
cleanup_colab_corpora()
from llama_index.core import SimpleDirectoryReader
from llama_index.indices.managed.google import GoogleIndex
from llama_index.core import Response
import time
index = GoogleIndex.create_corpus(
corpus_id=SESSION_CORPUS_ID, display_name="My first corpus!"
)
print(f"Newly created corpus ID is {index.corpus_id}.")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
index.insert_documents(documents)
for corpus in list_corpora():
print(corpus)
query_engine = index.as_query_engine()
response = query_engine.query("What did Paul Graham do growing up?")
assert isinstance(response, Response)
print(f"Response is {response.response}")
for cited_text in [node.text for node in response.source_nodes]:
print(f"Cited text: {cited_text}")
if response.metadata:
print(
f"Answerability: {response.metadata.get('answerable_probability', 0)}"
)
index = GoogleIndex.from_corpus(corpus_id=SESSION_CORPUS_ID)
query_engine = index.as_query_engine()
response = query_engine.query("Which company did Paul Graham build?")
assert isinstance(response, Response)
print(f"Response is {response.response}")
from llama_index.core.schema import NodeRelationship, RelatedNodeInfo, TextNode
index = GoogleIndex.from_corpus(corpus_id=SESSION_CORPUS_ID)
index.insert_nodes(
[
TextNode(
text="It was the best of times.",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="123",
metadata={"file_name": "Tale of Two Cities"},
)
},
),
TextNode(
text="It was the worst of times.",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="123",
metadata={"file_name": "Tale of Two Cities"},
)
},
),
TextNode(
text="Bugs Bunny: Wassup doc?",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="456",
metadata={"file_name": "Bugs Bunny Adventure"},
)
},
),
]
)
from google.ai.generativelanguage import (
GenerateAnswerRequest,
HarmCategory,
SafetySetting,
)
index = GoogleIndex.from_corpus(corpus_id=SESSION_CORPUS_ID)
query_engine = index.as_query_engine(
temperature=0.2,
answer_style=GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE,
safety_setting=[
SafetySetting(
category=HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
threshold=SafetySetting.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
),
SafetySetting(
category=HarmCategory.HARM_CATEGORY_VIOLENCE,
threshold=SafetySetting.HarmBlockThreshold.BLOCK_ONLY_HIGH,
),
],
)
response = query_engine.query("What was Bugs Bunny's favorite saying?")
print(response)
from llama_index.core import Response
response = query_engine.query("What were Paul Graham's achievements?")
assert isinstance(response, Response)
print(f"Response is {response.response}")
for cited_text in [node.text for node in response.source_nodes]:
print(f"Cited text: {cited_text}")
if response.metadata:
print(
f"Answerability: {response.metadata.get('answerable_probability', 0)}"
)
from llama_index.llms.gemini import Gemini
GEMINI_API_KEY = "" # @param {type:"string"}
gemini = Gemini(api_key=GEMINI_API_KEY)
from llama_index.response_synthesizers.google import GoogleTextSynthesizer
from llama_index.vector_stores.google import GoogleVectorStore
from llama_index.core import VectorStoreIndex
from llama_index.core.postprocessor import LLMRerank
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core.retrievers import VectorIndexRetriever
store = GoogleVectorStore.from_corpus(corpus_id=SESSION_CORPUS_ID)
index = VectorStoreIndex.from_vector_store(
vector_store=store,
)
response_synthesizer = GoogleTextSynthesizer.from_defaults(
temperature=0.2,
answer_style=GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE,
)
reranker = LLMRerank(
top_n=10,
llm=gemini,
)
query_engine = RetrieverQueryEngine.from_args(
retriever=VectorIndexRetriever(
index=index,
similarity_top_k=20,
),
node_postprocessors=[reranker],
response_synthesizer=response_synthesizer,
)
response = query_engine.query("What were Paul Graham's achievements?")
print(response)
from llama_index.core.indices.query.query_transform.base import (
StepDecomposeQueryTransform,
)
from llama_index.core.query_engine import MultiStepQueryEngine
store = GoogleVectorStore.from_corpus(corpus_id=SESSION_CORPUS_ID)
index = VectorStoreIndex.from_vector_store(
vector_store=store,
)
response_synthesizer = GoogleTextSynthesizer.from_defaults(
temperature=0.2,
answer_style=GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE,
)
single_step_query_engine = index.as_query_engine(
similarity_top_k=10,
response_synthesizer=response_synthesizer,
)
step_decompose_transform = StepDecomposeQueryTransform(
llm=gemini,
verbose=True,
)
query_engine = MultiStepQueryEngine(
query_engine=single_step_query_engine,
query_transform=step_decompose_transform,
response_synthesizer=response_synthesizer,
index_summary="Ask me anything.",
num_steps=6,
)
response = query_engine.query("What were Paul Graham's achievements?")
print(response)
from llama_index.core.indices.query.query_transform import HyDEQueryTransform
from llama_index.core.query_engine import TransformQueryEngine
store = | GoogleVectorStore.from_corpus(corpus_id=SESSION_CORPUS_ID) | llama_index.vector_stores.google.GoogleVectorStore.from_corpus |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant')
get_ipython().system('pip install llama-index qdrant_client')
import qdrant_client
from llama_index.core import VectorStoreIndex
from llama_index.vector_stores.qdrant import QdrantVectorStore
client = qdrant_client.QdrantClient(
location=":memory:"
)
from llama_index.core.schema import TextNode
nodes = [
TextNode(
text="The Shawshank Redemption",
metadata={
"author": "Stephen King",
"theme": "Friendship",
"year": 1994,
},
),
TextNode(
text="The Godfather",
metadata={
"director": "Francis Ford Coppola",
"theme": "Mafia",
"year": 1972,
},
),
TextNode(
text="Inception",
metadata={
"director": "Christopher Nolan",
"theme": "Fiction",
"year": 2010,
},
),
TextNode(
text="To Kill a Mockingbird",
metadata={
"author": "Harper Lee",
"theme": "Mafia",
"year": 1960,
},
),
TextNode(
text="1984",
metadata={
"author": "George Orwell",
"theme": "Totalitarianism",
"year": 1949,
},
),
TextNode(
text="The Great Gatsby",
metadata={
"author": "F. Scott Fitzgerald",
"theme": "The American Dream",
"year": 1925,
},
),
TextNode(
text="Harry Potter and the Sorcerer's Stone",
metadata={
"author": "J.K. Rowling",
"theme": "Fiction",
"year": 1997,
},
),
]
import os
from llama_index.core import StorageContext
os.environ["OPENAI_API_KEY"] = "sk-..."
vector_store = QdrantVectorStore(
client=client, collection_name="test_collection_1"
)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex(nodes, storage_context=storage_context)
from llama_index.core.vector_stores import (
MetadataFilter,
MetadataFilters,
FilterOperator,
)
filters = MetadataFilters(
filters=[
MetadataFilter(key="theme", operator=FilterOperator.EQ, value="Mafia"),
]
)
retriever = index.as_retriever(filters=filters)
retriever.retrieve("What is inception about?")
from llama_index.core.vector_stores import FilterOperator, FilterCondition
filters = MetadataFilters(
filters=[
| MetadataFilter(key="theme", value="Fiction") | llama_index.core.vector_stores.MetadataFilter |
get_ipython().run_line_magic('pip', 'install llama-index-readers-slack')
get_ipython().system('pip install llama-index')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import SummaryIndex
from llama_index.readers.slack import SlackReader
from IPython.display import Markdown, display
import os
slack_token = os.getenv("SLACK_BOT_TOKEN")
channel_ids = ["<channel_id>"]
documents = | SlackReader(slack_token=slack_token) | llama_index.readers.slack.SlackReader |
get_ipython().system('pip install llama-index')
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
load_index_from_storage,
StorageContext,
)
from IPython.display import Markdown, display
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine(response_mode="tree_summarize")
def display_prompt_dict(prompts_dict):
for k, p in prompts_dict.items():
text_md = f"**Prompt Key**: {k}<br>" f"**Text:** <br>"
display(Markdown(text_md))
print(p.get_template())
display(Markdown("<br><br>"))
prompts_dict = query_engine.get_prompts()
display_prompt_dict(prompts_dict)
prompts_dict = query_engine.response_synthesizer.get_prompts()
display_prompt_dict(prompts_dict)
query_engine = index.as_query_engine(response_mode="compact")
prompts_dict = query_engine.get_prompts()
display_prompt_dict(prompts_dict)
response = query_engine.query("What did the author do growing up?")
print(str(response))
from llama_index.core import PromptTemplate
query_engine = index.as_query_engine(response_mode="tree_summarize")
new_summary_tmpl_str = (
"Context information is below.\n"
"---------------------\n"
"{context_str}\n"
"---------------------\n"
"Given the context information and not prior knowledge, "
"answer the query in the style of a Shakespeare play.\n"
"Query: {query_str}\n"
"Answer: "
)
new_summary_tmpl = PromptTemplate(new_summary_tmpl_str)
query_engine.update_prompts(
{"response_synthesizer:summary_template": new_summary_tmpl}
)
prompts_dict = query_engine.get_prompts()
display_prompt_dict(prompts_dict)
response = query_engine.query("What did the author do growing up?")
print(str(response))
from llama_index.core.query_engine import (
RouterQueryEngine,
FLAREInstructQueryEngine,
)
from llama_index.core.selectors import LLMMultiSelector
from llama_index.core.evaluation import FaithfulnessEvaluator, DatasetGenerator
from llama_index.core.postprocessor import LLMRerank
from llama_index.core.tools import QueryEngineTool
query_tool = QueryEngineTool.from_defaults(
query_engine=query_engine, description="test description"
)
router_query_engine = RouterQueryEngine.from_defaults([query_tool])
prompts_dict = router_query_engine.get_prompts()
display_prompt_dict(prompts_dict)
flare_query_engine = FLAREInstructQueryEngine(query_engine)
prompts_dict = flare_query_engine.get_prompts()
display_prompt_dict(prompts_dict)
from llama_index.core.selectors import LLMSingleSelector
selector = LLMSingleSelector.from_defaults()
prompts_dict = selector.get_prompts()
display_prompt_dict(prompts_dict)
evaluator = FaithfulnessEvaluator()
prompts_dict = evaluator.get_prompts()
display_prompt_dict(prompts_dict)
dataset_generator = | DatasetGenerator.from_documents(documents) | llama_index.core.evaluation.DatasetGenerator.from_documents |
get_ipython().run_line_magic('pip', 'install llama-index-llms-bedrock')
get_ipython().system('pip install llama-index')
from llama_index.llms.bedrock import Bedrock
profile_name = "Your aws profile name"
resp = Bedrock(
model="amazon.titan-text-express-v1", profile_name=profile_name
).complete("Paul Graham is ")
print(resp)
from llama_index.core.llms import ChatMessage
from llama_index.llms.bedrock import Bedrock
messages = [
ChatMessage(
role="system", content="You are a pirate with a colorful personality"
),
ChatMessage(role="user", content="Tell me a story"),
]
resp = Bedrock(
model="amazon.titan-text-express-v1", profile_name=profile_name
).chat(messages)
print(resp)
from llama_index.llms.bedrock import Bedrock
llm = Bedrock(model="amazon.titan-text-express-v1", profile_name=profile_name)
resp = llm.stream_complete("Paul Graham is ")
for r in resp:
print(r.delta, end="")
from llama_index.llms.bedrock import Bedrock
llm = | Bedrock(model="amazon.titan-text-express-v1", profile_name=profile_name) | llama_index.llms.bedrock.Bedrock |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
from llama_index.core import (
SimpleDirectoryReader,
VectorStoreIndex,
StorageContext,
load_index_from_storage,
)
from llama_index.core.tools import QueryEngineTool, ToolMetadata
try:
storage_context = StorageContext.from_defaults(
persist_dir="./storage/lyft"
)
lyft_index = load_index_from_storage(storage_context)
storage_context = StorageContext.from_defaults(
persist_dir="./storage/uber"
)
uber_index = | load_index_from_storage(storage_context) | llama_index.core.load_index_from_storage |
get_ipython().run_line_magic('pip', 'install llama-index-readers-make-com')
get_ipython().system('pip install llama-index')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.readers.make_com import MakeWrapper
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
index = VectorStoreIndex.from_documents(documents=documents)
query_str = "What did the author do growing up?"
query_engine = index.as_query_engine()
response = query_engine.query(query_str)
wrapper = | MakeWrapper() | llama_index.readers.make_com.MakeWrapper |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface')
get_ipython().system('pip install llama-index')
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.core.evaluation.benchmarks import BeirEvaluator
from llama_index.core import VectorStoreIndex
def create_retriever(documents):
embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")
index = VectorStoreIndex.from_documents(
documents, embed_model=embed_model, show_progress=True
)
return index.as_retriever(similarity_top_k=30)
| BeirEvaluator() | llama_index.core.evaluation.benchmarks.BeirEvaluator |
get_ipython().run_line_magic('pip', 'install llama-index-evaluation-tonic-validate')
import json
import pandas as pd
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.evaluation.tonic_validate import (
AnswerConsistencyEvaluator,
AnswerSimilarityEvaluator,
AugmentationAccuracyEvaluator,
AugmentationPrecisionEvaluator,
RetrievalPrecisionEvaluator,
TonicValidateEvaluator,
)
question = "What makes Sam Altman a good founder?"
reference_answer = "He is smart and has a great force of will."
llm_answer = "He is a good founder because he is smart."
retrieved_context_list = [
"Sam Altman is a good founder. He is very smart.",
"What makes Sam Altman such a good founder is his great force of will.",
]
answer_similarity_evaluator = AnswerSimilarityEvaluator()
score = await answer_similarity_evaluator.aevaluate(
question,
llm_answer,
retrieved_context_list,
reference_response=reference_answer,
)
score
answer_consistency_evaluator = AnswerConsistencyEvaluator()
score = await answer_consistency_evaluator.aevaluate(
question, llm_answer, retrieved_context_list
)
score
augmentation_accuracy_evaluator = AugmentationAccuracyEvaluator()
score = await augmentation_accuracy_evaluator.aevaluate(
question, llm_answer, retrieved_context_list
)
score
augmentation_precision_evaluator = AugmentationPrecisionEvaluator()
score = await augmentation_precision_evaluator.aevaluate(
question, llm_answer, retrieved_context_list
)
score
retrieval_precision_evaluator = | RetrievalPrecisionEvaluator() | llama_index.evaluation.tonic_validate.RetrievalPrecisionEvaluator |
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia')
get_ipython().run_line_magic('pip', 'install llama-index-llms-azure-openai')
get_ipython().run_line_magic('pip', 'install llama-index-graph-stores-nebula')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-azure-openai')
get_ipython().system('pip install llama-index')
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
import logging
import sys
logging.basicConfig(
stream=sys.stdout, level=logging.INFO
) # logging.DEBUG for more verbose output
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
Settings.llm = OpenAI(temperature=0, model="gpt-3.5-turbo")
Settings.chunk_size = 512
from llama_index.llms.azure_openai import AzureOpenAI
from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding
api_key = "<api-key>"
azure_endpoint = "https://<your-resource-name>.openai.azure.com/"
api_version = "2023-07-01-preview"
llm = AzureOpenAI(
model="gpt-35-turbo-16k",
deployment_name="my-custom-llm",
api_key=api_key,
azure_endpoint=azure_endpoint,
api_version=api_version,
)
embed_model = AzureOpenAIEmbedding(
model="text-embedding-ada-002",
deployment_name="my-custom-embedding",
api_key=api_key,
azure_endpoint=azure_endpoint,
api_version=api_version,
)
from llama_index.core import Settings
Settings.llm = llm
Settings.embed_model = embed_model
Settings.chunk_size = 512
get_ipython().run_line_magic('pip', 'install ipython-ngql nebula3-python')
os.environ["NEBULA_USER"] = "root"
os.environ["NEBULA_PASSWORD"] = "nebula" # default is "nebula"
os.environ[
"NEBULA_ADDRESS"
] = "127.0.0.1:9669" # assumed we have NebulaGraph installed locally
space_name = "llamaindex"
edge_types, rel_prop_names = ["relationship"], [
"relationship"
] # default, could be omit if create from an empty kg
tags = ["entity"] # default, could be omit if create from an empty kg
from llama_index.core import StorageContext
from llama_index.graph_stores.nebula import NebulaGraphStore
graph_store = NebulaGraphStore(
space_name=space_name,
edge_types=edge_types,
rel_prop_names=rel_prop_names,
tags=tags,
)
storage_context = StorageContext.from_defaults(graph_store=graph_store)
from llama_index.core import download_loader
from llama_index.readers.wikipedia import WikipediaReader
loader = | WikipediaReader() | llama_index.readers.wikipedia.WikipediaReader |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-weaviate')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
from llama_index.core import SimpleDirectoryReader
from llama_index.core import SummaryIndex
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
wiki_titles = ["Michael Jordan", "Elon Musk", "Richard Branson", "Rihanna"]
wiki_metadatas = {
"Michael Jordan": {
"category": "Sports",
"country": "United States",
},
"Elon Musk": {
"category": "Business",
"country": "United States",
},
"Richard Branson": {
"category": "Business",
"country": "UK",
},
"Rihanna": {
"category": "Music",
"country": "Barbados",
},
}
from pathlib import Path
import requests
for title in wiki_titles:
response = requests.get(
"https://en.wikipedia.org/w/api.php",
params={
"action": "query",
"format": "json",
"titles": title,
"prop": "extracts",
"explaintext": True,
},
).json()
page = next(iter(response["query"]["pages"].values()))
wiki_text = page["extract"]
data_path = Path("data")
if not data_path.exists():
Path.mkdir(data_path)
with open(data_path / f"{title}.txt", "w") as fp:
fp.write(wiki_text)
docs_dict = {}
for wiki_title in wiki_titles:
doc = SimpleDirectoryReader(
input_files=[f"data/{wiki_title}.txt"]
).load_data()[0]
doc.metadata.update(wiki_metadatas[wiki_title])
docs_dict[wiki_title] = doc
from llama_index.llms.openai import OpenAI
from llama_index.core.callbacks import LlamaDebugHandler, CallbackManager
from llama_index.core.node_parser import SentenceSplitter
llm = OpenAI("gpt-4")
callback_manager = CallbackManager([ | LlamaDebugHandler() | llama_index.core.callbacks.LlamaDebugHandler |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = | SimpleDirectoryReader("./data/paul_graham") | llama_index.core.SimpleDirectoryReader |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-weaviate')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().system('pip install llama-index')
import weaviate
client = weaviate.Client("https://test-cluster-bbn8vqsn.weaviate.network")
try:
client.schema.delete_class("Book")
except:
pass
schema = {
"classes": [
{
"class": "Book",
"properties": [
{"name": "title", "dataType": ["text"]},
{"name": "author", "dataType": ["text"]},
{"name": "content", "dataType": ["text"]},
{"name": "year", "dataType": ["int"]},
],
},
]
}
if not client.schema.contains(schema):
client.schema.create(schema)
books = [
{
"title": "To Kill a Mockingbird",
"author": "Harper Lee",
"content": (
"To Kill a Mockingbird is a novel by Harper Lee published in"
" 1960..."
),
"year": 1960,
},
{
"title": "1984",
"author": "George Orwell",
"content": (
"1984 is a dystopian novel by George Orwell published in 1949..."
),
"year": 1949,
},
{
"title": "The Great Gatsby",
"author": "F. Scott Fitzgerald",
"content": (
"The Great Gatsby is a novel by F. Scott Fitzgerald published in"
" 1925..."
),
"year": 1925,
},
{
"title": "Pride and Prejudice",
"author": "Jane Austen",
"content": (
"Pride and Prejudice is a novel by Jane Austen published in"
" 1813..."
),
"year": 1813,
},
]
from llama_index.embeddings.openai import OpenAIEmbedding
embed_model = | OpenAIEmbedding() | llama_index.embeddings.openai.OpenAIEmbedding |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-chroma')
get_ipython().system('pip install llama-index')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
import os
import getpass
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
import openai
openai.api_key = os.environ["OPENAI_API_KEY"]
import chromadb
chroma_client = chromadb.EphemeralClient()
chroma_collection = chroma_client.create_collection("quickstart")
from llama_index.core import VectorStoreIndex, StorageContext
from llama_index.vector_stores.chroma import ChromaVectorStore
from llama_index.core.schema import TextNode
nodes = [
TextNode(
text=(
"Michael Jordan is a retired professional basketball player,"
" widely regarded as one of the greatest basketball players of all"
" time."
),
metadata={
"category": "Sports",
"country": "United States",
},
),
TextNode(
text=(
"Angelina Jolie is an American actress, filmmaker, and"
" humanitarian. She has received numerous awards for her acting"
" and is known for her philanthropic work."
),
metadata={
"category": "Entertainment",
"country": "United States",
},
),
TextNode(
text=(
"Elon Musk is a business magnate, industrial designer, and"
" engineer. He is the founder, CEO, and lead designer of SpaceX,"
" Tesla, Inc., Neuralink, and The Boring Company."
),
metadata={
"category": "Business",
"country": "United States",
},
),
TextNode(
text=(
"Rihanna is a Barbadian singer, actress, and businesswoman. She"
" has achieved significant success in the music industry and is"
" known for her versatile musical style."
),
metadata={
"category": "Music",
"country": "Barbados",
},
),
TextNode(
text=(
"Cristiano Ronaldo is a Portuguese professional footballer who is"
" considered one of the greatest football players of all time. He"
" has won numerous awards and set multiple records during his"
" career."
),
metadata={
"category": "Sports",
"country": "Portugal",
},
),
]
vector_store = | ChromaVectorStore(chroma_collection=chroma_collection) | llama_index.vector_stores.chroma.ChromaVectorStore |
get_ipython().run_line_magic('pip', 'install llama-index-llms-palm')
get_ipython().system('pip install llama-index')
get_ipython().system('pip install -q google-generativeai')
import pprint
import google.generativeai as palm
palm_api_key = ""
palm.configure(api_key=palm_api_key)
models = [
m
for m in palm.list_models()
if "generateText" in m.supported_generation_methods
]
model = models[0].name
print(model)
from llama_index.llms.palm import PaLM
model = | PaLM(api_key=palm_api_key) | llama_index.llms.palm.PaLM |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-milvus')
get_ipython().system(' pip install llama-index')
import logging
import sys
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Document
from llama_index.vector_stores.milvus import MilvusVectorStore
from IPython.display import Markdown, display
import textwrap
import openai
openai.api_key = "sk-"
get_ipython().system(" mkdir -p 'data/paul_graham/'")
get_ipython().system(" wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
print("Document ID:", documents[0].doc_id)
from llama_index.core import StorageContext
vector_store = MilvusVectorStore(dim=1536, overwrite=True)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
query_engine = index.as_query_engine()
response = query_engine.query("What did the author learn?")
print(textwrap.fill(str(response), 100))
response = query_engine.query("What was a hard moment for the author?")
print(textwrap.fill(str(response), 100))
vector_store = | MilvusVectorStore(dim=1536, overwrite=True) | llama_index.vector_stores.milvus.MilvusVectorStore |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-chroma')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface')
get_ipython().system('pip install llama-index')
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.vector_stores.chroma import ChromaVectorStore
from llama_index.core import StorageContext
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from IPython.display import Markdown, display
import chromadb
import os
import getpass
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
import openai
openai.api_key = os.environ["OPENAI_API_KEY"]
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
chroma_client = chromadb.EphemeralClient()
chroma_collection = chroma_client.create_collection("quickstart")
embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-base-en-v1.5")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context, embed_model=embed_model
)
query_engine = index.as_query_engine()
response = query_engine.query("What did the author do growing up?")
display(Markdown(f"<b>{response}</b>"))
db = chromadb.PersistentClient(path="./chroma_db")
chroma_collection = db.get_or_create_collection("quickstart")
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context, embed_model=embed_model
)
db2 = chromadb.PersistentClient(path="./chroma_db")
chroma_collection = db2.get_or_create_collection("quickstart")
vector_store = | ChromaVectorStore(chroma_collection=chroma_collection) | llama_index.vector_stores.chroma.ChromaVectorStore |
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-replicate')
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('', 'autoreload 2')
get_ipython().system('pip install unstructured')
from unstructured.partition.html import partition_html
import pandas as pd
pd.set_option("display.max_rows", None)
pd.set_option("display.max_columns", None)
pd.set_option("display.width", None)
pd.set_option("display.max_colwidth", None)
get_ipython().system('wget "https://www.dropbox.com/scl/fi/mlaymdy1ni1ovyeykhhuk/tesla_2021_10k.htm?rlkey=qf9k4zn0ejrbm716j0gg7r802&dl=1" -O tesla_2021_10k.htm')
get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1THe1qqM61lretr9N3BmINc_NWDvuthYf" -O shanghai.jpg')
get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1PDVCf_CzLWXNnNoRV8CFgoJxv6U0sHAO" -O tesla_supercharger.jpg')
from llama_index.readers.file import FlatReader
from pathlib import Path
reader = | FlatReader() | llama_index.readers.file.FlatReader |
import openai
openai.api_key = "sk-your-key"
from llama_index.agent import OpenAIAgent
from llama_index.tools.wolfram_alpha.base import WolframAlphaToolSpec
wolfram_spec = WolframAlphaToolSpec(app_id="your-key")
tools = wolfram_spec.to_tool_list()
agent = | OpenAIAgent.from_tools(tools, verbose=True) | llama_index.agent.OpenAIAgent.from_tools |
from llama_index.agent import OpenAIAgent
import openai
openai.api_key = "sk-your-key"
from llama_index.tools.yelp.base import YelpToolSpec
from llama_index.tools.tool_spec.load_and_search.base import LoadAndSearchToolSpec
tool_spec = | YelpToolSpec(api_key="your-key", client_id="your-id") | llama_index.tools.yelp.base.YelpToolSpec |
get_ipython().run_line_magic('pip', 'install llama-index-finetuning')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-finetuning-callbacks')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-program-openai')
import nest_asyncio
nest_asyncio.apply()
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
from llama_index.program.openai import OpenAIPydanticProgram
from pydantic import BaseModel
from llama_index.llms.openai import OpenAI
from llama_index.finetuning.callbacks import OpenAIFineTuningHandler
from llama_index.core.callbacks import CallbackManager
from typing import List
class Song(BaseModel):
"""Data model for a song."""
title: str
length_seconds: int
class Album(BaseModel):
"""Data model for an album."""
name: str
artist: str
songs: List[Song]
finetuning_handler = OpenAIFineTuningHandler()
callback_manager = | CallbackManager([finetuning_handler]) | llama_index.core.callbacks.CallbackManager |
get_ipython().system('pip install llama-index-multi-modal-llms-ollama')
get_ipython().system('pip install llama-index-readers-file')
get_ipython().system('pip install unstructured')
get_ipython().system('pip install llama-index-embeddings-huggingface')
get_ipython().system('pip install llama-index-vector-stores-qdrant')
get_ipython().system('pip install llama-index-embeddings-clip')
from llama_index.multi_modal_llms.ollama import OllamaMultiModal
mm_model = | OllamaMultiModal(model="llava:13b") | llama_index.multi_modal_llms.ollama.OllamaMultiModal |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-.."
openai.api_key = os.environ["OPENAI_API_KEY"]
from IPython.display import Markdown, display
from sqlalchemy import (
create_engine,
MetaData,
Table,
Column,
String,
Integer,
select,
)
engine = create_engine("sqlite:///:memory:")
metadata_obj = MetaData()
table_name = "city_stats"
city_stats_table = Table(
table_name,
metadata_obj,
Column("city_name", String(16), primary_key=True),
Column("population", Integer),
Column("country", String(16), nullable=False),
)
metadata_obj.create_all(engine)
from llama_index.core import SQLDatabase
from llama_index.llms.openai import OpenAI
llm = OpenAI(temperature=0.1, model="gpt-3.5-turbo")
sql_database = | SQLDatabase(engine, include_tables=["city_stats"]) | llama_index.core.SQLDatabase |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('wget "https://github.com/ppasupat/WikiTableQuestions/releases/download/v1.0.2/WikiTableQuestions-1.0.2-compact.zip" -O data.zip')
get_ipython().system('unzip data.zip')
import pandas as pd
from pathlib import Path
data_dir = Path("./WikiTableQuestions/csv/200-csv")
csv_files = sorted([f for f in data_dir.glob("*.csv")])
dfs = []
for csv_file in csv_files:
print(f"processing file: {csv_file}")
try:
df = pd.read_csv(csv_file)
dfs.append(df)
except Exception as e:
print(f"Error parsing {csv_file}: {str(e)}")
tableinfo_dir = "WikiTableQuestions_TableInfo"
get_ipython().system('mkdir {tableinfo_dir}')
from llama_index.core.program import LLMTextCompletionProgram
from llama_index.core.bridge.pydantic import BaseModel, Field
from llama_index.llms.openai import OpenAI
class TableInfo(BaseModel):
"""Information regarding a structured table."""
table_name: str = Field(
..., description="table name (must be underscores and NO spaces)"
)
table_summary: str = Field(
..., description="short, concise summary/caption of the table"
)
prompt_str = """\
Give me a summary of the table with the following JSON format.
- The table name must be unique to the table and describe it while being concise.
- Do NOT output a generic table name (e.g. table, my_table).
Do NOT make the table name one of the following: {exclude_table_name_list}
Table:
{table_str}
Summary: """
program = LLMTextCompletionProgram.from_defaults(
output_cls=TableInfo,
llm=OpenAI(model="gpt-3.5-turbo"),
prompt_template_str=prompt_str,
)
import json
def _get_tableinfo_with_index(idx: int) -> str:
results_gen = Path(tableinfo_dir).glob(f"{idx}_*")
results_list = list(results_gen)
if len(results_list) == 0:
return None
elif len(results_list) == 1:
path = results_list[0]
return TableInfo.parse_file(path)
else:
raise ValueError(
f"More than one file matching index: {list(results_gen)}"
)
table_names = set()
table_infos = []
for idx, df in enumerate(dfs):
table_info = _get_tableinfo_with_index(idx)
if table_info:
table_infos.append(table_info)
else:
while True:
df_str = df.head(10).to_csv()
table_info = program(
table_str=df_str,
exclude_table_name_list=str(list(table_names)),
)
table_name = table_info.table_name
print(f"Processed table: {table_name}")
if table_name not in table_names:
table_names.add(table_name)
break
else:
print(f"Table name {table_name} already exists, trying again.")
pass
out_file = f"{tableinfo_dir}/{idx}_{table_name}.json"
json.dump(table_info.dict(), open(out_file, "w"))
table_infos.append(table_info)
from sqlalchemy import (
create_engine,
MetaData,
Table,
Column,
String,
Integer,
)
import re
def sanitize_column_name(col_name):
return re.sub(r"\W+", "_", col_name)
def create_table_from_dataframe(
df: pd.DataFrame, table_name: str, engine, metadata_obj
):
sanitized_columns = {col: sanitize_column_name(col) for col in df.columns}
df = df.rename(columns=sanitized_columns)
columns = [
Column(col, String if dtype == "object" else Integer)
for col, dtype in zip(df.columns, df.dtypes)
]
table = Table(table_name, metadata_obj, *columns)
metadata_obj.create_all(engine)
with engine.connect() as conn:
for _, row in df.iterrows():
insert_stmt = table.insert().values(**row.to_dict())
conn.execute(insert_stmt)
conn.commit()
engine = create_engine("sqlite:///:memory:")
metadata_obj = MetaData()
for idx, df in enumerate(dfs):
tableinfo = _get_tableinfo_with_index(idx)
print(f"Creating table: {tableinfo.table_name}")
create_table_from_dataframe(df, tableinfo.table_name, engine, metadata_obj)
import phoenix as px
import llama_index.core
px.launch_app()
llama_index.core.set_global_handler("arize_phoenix")
from llama_index.core.objects import (
SQLTableNodeMapping,
ObjectIndex,
SQLTableSchema,
)
from llama_index.core import SQLDatabase, VectorStoreIndex
sql_database = SQLDatabase(engine)
table_node_mapping = SQLTableNodeMapping(sql_database)
table_schema_objs = [
SQLTableSchema(table_name=t.table_name, context_str=t.table_summary)
for t in table_infos
] # add a SQLTableSchema for each table
obj_index = ObjectIndex.from_objects(
table_schema_objs,
table_node_mapping,
VectorStoreIndex,
)
obj_retriever = obj_index.as_retriever(similarity_top_k=3)
from llama_index.core.retrievers import SQLRetriever
from typing import List
from llama_index.core.query_pipeline import FnComponent
sql_retriever = SQLRetriever(sql_database)
def get_table_context_str(table_schema_objs: List[SQLTableSchema]):
"""Get table context string."""
context_strs = []
for table_schema_obj in table_schema_objs:
table_info = sql_database.get_single_table_info(
table_schema_obj.table_name
)
if table_schema_obj.context_str:
table_opt_context = " The table description is: "
table_opt_context += table_schema_obj.context_str
table_info += table_opt_context
context_strs.append(table_info)
return "\n\n".join(context_strs)
table_parser_component = FnComponent(fn=get_table_context_str)
from llama_index.core.prompts.default_prompts import DEFAULT_TEXT_TO_SQL_PROMPT
from llama_index.core import PromptTemplate
from llama_index.core.query_pipeline import FnComponent
from llama_index.core.llms import ChatResponse
def parse_response_to_sql(response: ChatResponse) -> str:
"""Parse response to SQL."""
response = response.message.content
sql_query_start = response.find("SQLQuery:")
if sql_query_start != -1:
response = response[sql_query_start:]
if response.startswith("SQLQuery:"):
response = response[len("SQLQuery:") :]
sql_result_start = response.find("SQLResult:")
if sql_result_start != -1:
response = response[:sql_result_start]
return response.strip().strip("```").strip()
sql_parser_component = FnComponent(fn=parse_response_to_sql)
text2sql_prompt = DEFAULT_TEXT_TO_SQL_PROMPT.partial_format(
dialect=engine.dialect.name
)
print(text2sql_prompt.template)
response_synthesis_prompt_str = (
"Given an input question, synthesize a response from the query results.\n"
"Query: {query_str}\n"
"SQL: {sql_query}\n"
"SQL Response: {context_str}\n"
"Response: "
)
response_synthesis_prompt = PromptTemplate(
response_synthesis_prompt_str,
)
llm = OpenAI(model="gpt-3.5-turbo")
from llama_index.core.query_pipeline import (
QueryPipeline as QP,
Link,
InputComponent,
CustomQueryComponent,
)
qp = QP(
modules={
"input": InputComponent(),
"table_retriever": obj_retriever,
"table_output_parser": table_parser_component,
"text2sql_prompt": text2sql_prompt,
"text2sql_llm": llm,
"sql_output_parser": sql_parser_component,
"sql_retriever": sql_retriever,
"response_synthesis_prompt": response_synthesis_prompt,
"response_synthesis_llm": llm,
},
verbose=True,
)
qp.add_chain(["input", "table_retriever", "table_output_parser"])
qp.add_link("input", "text2sql_prompt", dest_key="query_str")
qp.add_link("table_output_parser", "text2sql_prompt", dest_key="schema")
qp.add_chain(
["text2sql_prompt", "text2sql_llm", "sql_output_parser", "sql_retriever"]
)
qp.add_link(
"sql_output_parser", "response_synthesis_prompt", dest_key="sql_query"
)
qp.add_link(
"sql_retriever", "response_synthesis_prompt", dest_key="context_str"
)
qp.add_link("input", "response_synthesis_prompt", dest_key="query_str")
qp.add_link("response_synthesis_prompt", "response_synthesis_llm")
from pyvis.network import Network
net = Network(notebook=True, cdn_resources="in_line", directed=True)
net.from_nx(qp.dag)
net.show("text2sql_dag.html")
response = qp.run(
query="What was the year that The Notorious B.I.G was signed to Bad Boy?"
)
print(str(response))
response = qp.run(query="Who won best director in the 1972 academy awards")
print(str(response))
response = qp.run(query="What was the term of Pasquale Preziosa?")
print(str(response))
from llama_index.core import VectorStoreIndex, load_index_from_storage
from sqlalchemy import text
from llama_index.core.schema import TextNode
from llama_index.core import StorageContext
import os
from pathlib import Path
from typing import Dict
def index_all_tables(
sql_database: SQLDatabase, table_index_dir: str = "table_index_dir"
) -> Dict[str, VectorStoreIndex]:
"""Index all tables."""
if not Path(table_index_dir).exists():
os.makedirs(table_index_dir)
vector_index_dict = {}
engine = sql_database.engine
for table_name in sql_database.get_usable_table_names():
print(f"Indexing rows in table: {table_name}")
if not os.path.exists(f"{table_index_dir}/{table_name}"):
with engine.connect() as conn:
cursor = conn.execute(text(f'SELECT * FROM "{table_name}"'))
result = cursor.fetchall()
row_tups = []
for row in result:
row_tups.append(tuple(row))
nodes = [TextNode(text=str(t)) for t in row_tups]
index = VectorStoreIndex(nodes)
index.set_index_id("vector_index")
index.storage_context.persist(f"{table_index_dir}/{table_name}")
else:
storage_context = StorageContext.from_defaults(
persist_dir=f"{table_index_dir}/{table_name}"
)
index = load_index_from_storage(
storage_context, index_id="vector_index"
)
vector_index_dict[table_name] = index
return vector_index_dict
vector_index_dict = index_all_tables(sql_database)
test_retriever = vector_index_dict["Bad_Boy_Artists"].as_retriever(
similarity_top_k=1
)
nodes = test_retriever.retrieve("P. Diddy")
print(nodes[0].get_content())
from llama_index.core.retrievers import SQLRetriever
from typing import List
from llama_index.core.query_pipeline import FnComponent
sql_retriever = | SQLRetriever(sql_database) | llama_index.core.retrievers.SQLRetriever |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import nest_asyncio
nest_asyncio.apply()
from llama_index.core.evaluation import generate_question_context_pairs
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.core.node_parser import SentenceSplitter
from llama_index.llms.openai import OpenAI
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
node_parser = SentenceSplitter(chunk_size=512)
nodes = node_parser.get_nodes_from_documents(documents)
for idx, node in enumerate(nodes):
node.id_ = f"node_{idx}"
llm = OpenAI(model="gpt-4")
vector_index = VectorStoreIndex(nodes)
retriever = vector_index.as_retriever(similarity_top_k=2)
retrieved_nodes = retriever.retrieve("What did the author do growing up?")
from llama_index.core.response.notebook_utils import display_source_node
for node in retrieved_nodes:
display_source_node(node, source_length=1000)
from llama_index.core.evaluation import (
generate_question_context_pairs,
EmbeddingQAFinetuneDataset,
)
qa_dataset = generate_question_context_pairs(
nodes, llm=llm, num_questions_per_chunk=2
)
queries = qa_dataset.queries.values()
print(list(queries)[2])
qa_dataset.save_json("pg_eval_dataset.json")
qa_dataset = | EmbeddingQAFinetuneDataset.from_json("pg_eval_dataset.json") | llama_index.core.evaluation.EmbeddingQAFinetuneDataset.from_json |
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-rankgpt-rerank')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-packs-infer-retrieve-rerank')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import datasets
dataset = datasets.load_dataset("BioDEX/BioDEX-ICSR")
dataset
from llama_index.core import get_tokenizer
import re
from typing import Set, List
tokenizer = get_tokenizer()
sample_size = 5
def get_reactions_row(raw_target: str) -> List[str]:
"""Get reactions from a single row."""
reaction_pattern = re.compile(r"reactions:\s*(.*)")
reaction_match = reaction_pattern.search(raw_target)
if reaction_match:
reactions = reaction_match.group(1).split(",")
reactions = [r.strip().lower() for r in reactions]
else:
reactions = []
return reactions
def get_reactions_set(dataset) -> Set[str]:
"""Get set of all reactions."""
reactions = set()
for data in dataset["train"]:
reactions.update(set(get_reactions_row(data["target"])))
return reactions
def get_samples(dataset, sample_size: int = 5):
"""Get processed sample.
Contains source text and also the reaction label.
Parse reaction text to specifically extract reactions.
"""
samples = []
for idx, data in enumerate(dataset["train"]):
if idx >= sample_size:
break
text = data["fulltext_processed"]
raw_target = data["target"]
reactions = get_reactions_row(raw_target)
samples.append({"text": text, "reactions": reactions})
return samples
from llama_index.packs.infer_retrieve_rerank import InferRetrieveRerankPack
from llama_index.core.llama_pack import download_llama_pack
InferRetrieveRerankPack = download_llama_pack(
"InferRetrieveRerankPack",
"./irr_pack",
)
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-3.5-turbo-16k")
pred_context = """\
The output predictins should be a list of comma-separated adverse \
drug reactions. \
"""
reranker_top_n = 10
pack = InferRetrieveRerankPack(
get_reactions_set(dataset),
llm=llm,
pred_context=pred_context,
reranker_top_n=reranker_top_n,
verbose=True,
)
samples = get_samples(dataset, sample_size=5)
pred_reactions = pack.run(inputs=[s["text"] for s in samples])
gt_reactions = [s["reactions"] for s in samples]
pred_reactions[2]
gt_reactions[2]
from llama_index.core.retrievers import BaseRetriever
from llama_index.core.llms import LLM
from llama_index.llms.openai import OpenAI
from llama_index.core import PromptTemplate
from llama_index.core.query_pipeline import QueryPipeline
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.postprocessor.rankgpt_rerank import RankGPTRerank
from llama_index.core.output_parsers import ChainableOutputParser
from typing import List
import random
all_reactions = get_reactions_set(dataset)
random.sample(all_reactions, 5)
from llama_index.core.schema import TextNode
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core.ingestion import IngestionPipeline
from llama_index.core import VectorStoreIndex
reaction_nodes = [TextNode(text=r) for r in all_reactions]
pipeline = IngestionPipeline(transformations=[ | OpenAIEmbedding() | llama_index.embeddings.openai.OpenAIEmbedding |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-chroma')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface')
get_ipython().system('pip install llama-index')
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.vector_stores.chroma import ChromaVectorStore
from llama_index.core import StorageContext
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from IPython.display import Markdown, display
import chromadb
import os
import getpass
os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
import openai
openai.api_key = os.environ["OPENAI_API_KEY"]
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
chroma_client = chromadb.EphemeralClient()
chroma_collection = chroma_client.create_collection("quickstart")
embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-base-en-v1.5")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context, embed_model=embed_model
)
query_engine = index.as_query_engine()
response = query_engine.query("What did the author do growing up?")
display(Markdown(f"<b>{response}</b>"))
db = chromadb.PersistentClient(path="./chroma_db")
chroma_collection = db.get_or_create_collection("quickstart")
vector_store = | ChromaVectorStore(chroma_collection=chroma_collection) | llama_index.vector_stores.chroma.ChromaVectorStore |
get_ipython().system('pip install llama-index-postprocessor-jinaai-rerank')
get_ipython().system('pip install llama-index-embeddings-jinaai')
get_ipython().system('pip install llama-index')
import os
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
)
from llama_index.embeddings.jinaai import JinaEmbedding
api_key = os.environ["JINA_API_KEY"]
jina_embeddings = JinaEmbedding(api_key=api_key)
import requests
url = "https://niketeam-asset-download.nike.net/catalogs/2024/2024_Nike%20Kids_02_09_24.pdf?cb=09302022"
response = requests.get(url)
with open("Nike_Catalog.pdf", "wb") as f:
f.write(response.content)
reader = SimpleDirectoryReader(input_files=["Nike_Catalog.pdf"])
documents = reader.load_data()
index = VectorStoreIndex.from_documents(
documents=documents, embed_model=jina_embeddings
)
query_engine = index.as_query_engine(similarity_top_k=10)
response = query_engine.query(
"What is the best jersey by Nike in terms of fabric?",
)
print(response.source_nodes[0].text, response.source_nodes[0].score)
print("\n")
print(response.source_nodes[1].text, response.source_nodes[1].score)
import os
from llama_index.postprocessor.jinaai_rerank import JinaRerank
jina_rerank = | JinaRerank(api_key=api_key, top_n=2) | llama_index.postprocessor.jinaai_rerank.JinaRerank |
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
from llama_index.agent.openai import OpenAIAgent
from llama_index.llms.openai import OpenAI
from llama_index.core.tools import BaseTool, FunctionTool
def multiply(a: int, b: int) -> int:
"""Multiple two integers and returns the result integer"""
return a * b
multiply_tool = FunctionTool.from_defaults(fn=multiply)
def add(a: int, b: int) -> int:
"""Add two integers and returns the result integer"""
return a + b
add_tool = | FunctionTool.from_defaults(fn=add) | llama_index.core.tools.FunctionTool.from_defaults |
get_ipython().run_line_magic('pip', 'install llama-index-llms-rungpt')
get_ipython().system('pip install llama-index')
get_ipython().system('pip install rungpt')
get_ipython().system('rungpt serve decapoda-research/llama-7b-hf --precision fp16 --device_map balanced')
from llama_index.llms.rungpt import RunGptLLM
llm = RunGptLLM()
promot = "What public transportation might be available in a city?"
response = llm.complete(promot)
print(response)
from llama_index.core.llms import ChatMessage, MessageRole
from llama_index.llms.rungpt import RunGptLLM
messages = [
ChatMessage(
role=MessageRole.USER,
content="Now, I want you to do some math for me.",
),
ChatMessage(
role=MessageRole.ASSISTANT, content="Sure, I would like to help you."
),
ChatMessage(
role=MessageRole.USER,
content="How many points determine a straight line?",
),
]
llm = | RunGptLLM() | llama_index.llms.rungpt.RunGptLLM |
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia')
get_ipython().system('pip install llama-index')
import time
import nest_asyncio
nest_asyncio.apply()
import os
os.environ["OPENAI_API_KEY"] = "[YOUR_API_KEY]"
from llama_index.core import VectorStoreIndex, download_loader
from llama_index.readers.wikipedia import WikipediaReader
loader = | WikipediaReader() | llama_index.readers.wikipedia.WikipediaReader |
import os
from getpass import getpass
if os.getenv("OPENAI_API_KEY") is None:
os.environ["OPENAI_API_KEY"] = getpass(
"Paste your OpenAI key from:"
" https://platform.openai.com/account/api-keys\n"
)
assert os.getenv("OPENAI_API_KEY", "").startswith(
"sk-"
), "This doesn't look like a valid OpenAI API key"
print("OpenAI API key configured")
get_ipython().run_line_magic('pip', 'install -q html2text llama-index pandas pyarrow tqdm')
get_ipython().run_line_magic('pip', 'install -q llama-index-readers-web')
get_ipython().run_line_magic('pip', 'install -q llama-index-callbacks-openinference')
import hashlib
import json
from pathlib import Path
import os
import textwrap
from typing import List, Union
import llama_index.core
from llama_index.readers.web import SimpleWebPageReader
from llama_index.core import VectorStoreIndex
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.callbacks import CallbackManager
from llama_index.callbacks.openinference import OpenInferenceCallbackHandler
from llama_index.callbacks.openinference.base import (
as_dataframe,
QueryData,
NodeData,
)
from llama_index.core.node_parser import SimpleNodeParser
import pandas as pd
from tqdm import tqdm
documents = SimpleWebPageReader().load_data(
[
"https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt"
]
)
print(documents[0].text)
parser = SentenceSplitter()
nodes = parser.get_nodes_from_documents(documents)
print(nodes[0].text)
callback_handler = OpenInferenceCallbackHandler()
callback_manager = CallbackManager([callback_handler])
llama_index.core.Settings.callback_manager = callback_manager
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine()
max_characters_per_line = 80
queries = [
"What did Paul Graham do growing up?",
"When and how did Paul Graham's mother die?",
"What, in Paul Graham's opinion, is the most distinctive thing about YC?",
"When and how did Paul Graham meet Jessica Livingston?",
"What is Bel, and when and where was it written?",
]
for query in queries:
response = query_engine.query(query)
print("Query")
print("=====")
print(textwrap.fill(query, max_characters_per_line))
print()
print("Response")
print("========")
print(textwrap.fill(str(response), max_characters_per_line))
print()
query_data_buffer = callback_handler.flush_query_data_buffer()
query_dataframe = as_dataframe(query_data_buffer)
query_dataframe
class ParquetCallback:
def __init__(
self, data_path: Union[str, Path], max_buffer_length: int = 1000
):
self._data_path = Path(data_path)
self._data_path.mkdir(parents=True, exist_ok=False)
self._max_buffer_length = max_buffer_length
self._batch_index = 0
def __call__(
self,
query_data_buffer: List[QueryData],
node_data_buffer: List[NodeData],
) -> None:
if len(query_data_buffer) >= self._max_buffer_length:
query_dataframe = | as_dataframe(query_data_buffer) | llama_index.callbacks.openinference.base.as_dataframe |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
from llama_index.core.agent import ReActAgent
from llama_index.llms.openai import OpenAI
from llama_index.core.llms import ChatMessage
from llama_index.core.tools import BaseTool, FunctionTool
def multiply(a: int, b: int) -> int:
"""Multiply two integers and returns the result integer"""
return a * b
multiply_tool = FunctionTool.from_defaults(fn=multiply)
def add(a: int, b: int) -> int:
"""Add two integers and returns the result integer"""
return a + b
add_tool = FunctionTool.from_defaults(fn=add)
llm = OpenAI(model="gpt-3.5-turbo-instruct")
agent = ReActAgent.from_tools([multiply_tool, add_tool], llm=llm, verbose=True)
response = agent.chat("What is 20+(2*4)? Calculate step by step ")
response_gen = agent.stream_chat("What is 20+2*4? Calculate step by step")
response_gen.print_response_stream()
llm = OpenAI(model="gpt-4")
agent = ReActAgent.from_tools([multiply_tool, add_tool], llm=llm, verbose=True)
response = agent.chat("What is 2+2*4")
print(response)
llm = OpenAI(model="gpt-4")
agent = | ReActAgent.from_tools([multiply_tool, add_tool], llm=llm, verbose=True) | llama_index.core.agent.ReActAgent.from_tools |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-cohere-rerank')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().handlers = []
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
StorageContext,
)
from llama_index.core import SummaryIndex
from llama_index.core.response.notebook_utils import display_response
from llama_index.llms.openai import OpenAI
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from pathlib import Path
from llama_index.core import Document
from llama_index.readers.file import PyMuPDFReader
loader = PyMuPDFReader()
docs0 = loader.load(file_path=Path("./data/llama2.pdf"))
doc_text = "\n\n".join([d.get_content() for d in docs0])
docs = [Document(text=doc_text)]
llm = OpenAI(model="gpt-4")
chunk_sizes = [128, 256, 512, 1024]
nodes_list = []
vector_indices = []
for chunk_size in chunk_sizes:
print(f"Chunk Size: {chunk_size}")
splitter = SentenceSplitter(chunk_size=chunk_size)
nodes = splitter.get_nodes_from_documents(docs)
for node in nodes:
node.metadata["chunk_size"] = chunk_size
node.excluded_embed_metadata_keys = ["chunk_size"]
node.excluded_llm_metadata_keys = ["chunk_size"]
nodes_list.append(nodes)
vector_index = VectorStoreIndex(nodes)
vector_indices.append(vector_index)
from llama_index.core.tools import RetrieverTool
from llama_index.core.schema import IndexNode
retriever_dict = {}
retriever_nodes = []
for chunk_size, vector_index in zip(chunk_sizes, vector_indices):
node_id = f"chunk_{chunk_size}"
node = IndexNode(
text=(
"Retrieves relevant context from the Llama 2 paper (chunk size"
f" {chunk_size})"
),
index_id=node_id,
)
retriever_nodes.append(node)
retriever_dict[node_id] = vector_index.as_retriever()
from llama_index.core.selectors import PydanticMultiSelector
from llama_index.core.retrievers import RouterRetriever
from llama_index.core.retrievers import RecursiveRetriever
from llama_index.core import SummaryIndex
summary_index = SummaryIndex(retriever_nodes)
retriever = RecursiveRetriever(
root_id="root",
retriever_dict={"root": summary_index.as_retriever(), **retriever_dict},
)
nodes = await retriever.aretrieve(
"Tell me about the main aspects of safety fine-tuning"
)
print(f"Number of nodes: {len(nodes)}")
for node in nodes:
print(node.node.metadata["chunk_size"])
print(node.node.get_text())
from llama_index.core.postprocessor import LLMRerank, SentenceTransformerRerank
from llama_index.postprocessor.cohere_rerank import CohereRerank
reranker = CohereRerank(top_n=10)
from llama_index.core.query_engine import RetrieverQueryEngine
query_engine = | RetrieverQueryEngine(retriever, node_postprocessors=[reranker]) | llama_index.core.query_engine.RetrieverQueryEngine |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-web')
get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-tools-metaphor')
get_ipython().system('wget "https://images.openai.com/blob/a2e49de2-ba5b-4869-9c2d-db3b4b5dcc19/new-models-and-developer-products-announced-at-devday.jpg?width=2000" -O other_images/openai/dev_day.png')
get_ipython().system('wget "https://drive.google.com/uc\\?id\\=1B4f5ZSIKN0zTTPPRlZ915Ceb3_uF9Zlq\\&export\\=download" -O other_images/adidas.png')
from llama_index.readers.web import SimpleWebPageReader
url = "https://openai.com/blog/new-models-and-developer-products-announced-at-devday"
reader = | SimpleWebPageReader(html_to_text=True) | llama_index.readers.web.SimpleWebPageReader |
get_ipython().run_line_magic('pip', 'install llama-index-readers-github')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-weaviate')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import nest_asyncio
nest_asyncio.apply()
import os
os.environ["GITHUB_TOKEN"] = ""
import os
from llama_index.readers.github import GitHubRepositoryIssuesReader, GitHubIssuesClient
github_client = GitHubIssuesClient()
loader = GitHubRepositoryIssuesReader(
github_client,
owner="run-llama",
repo="llama_index",
verbose=True,
)
orig_docs = loader.load_data()
limit = 100
docs = []
for idx, doc in enumerate(orig_docs):
doc.metadata["index_id"] = doc.id_
if idx >= limit:
break
docs.append(doc)
from copy import deepcopy
import asyncio
from tqdm.asyncio import tqdm_asyncio
from llama_index.core.indices import SummaryIndex
from llama_index.core import Document, ServiceContext
from llama_index.llms.openai import OpenAI
from llama_index.core.async_utils import run_jobs
async def aprocess_doc(doc, include_summary: bool = True):
"""Process doc."""
print(f"Processing {doc.id_}")
metadata = doc.metadata
date_tokens = metadata["created_at"].split("T")[0].split("-")
year = int(date_tokens[0])
month = int(date_tokens[1])
day = int(date_tokens[2])
assignee = "" if "assignee" not in doc.metadata else doc.metadata["assignee"]
size = ""
if len(doc.metadata["labels"]) > 0:
size_arr = [l for l in doc.metadata["labels"] if "size:" in l]
size = size_arr[0].split(":")[1] if len(size_arr) > 0 else ""
new_metadata = {
"state": metadata["state"],
"year": year,
"month": month,
"day": day,
"assignee": assignee,
"size": size,
"index_id": doc.id_,
}
summary_index = SummaryIndex.from_documents([doc])
query_str = "Give a one-sentence concise summary of this issue."
query_engine = summary_index.as_query_engine(
service_context=ServiceContext.from_defaults(llm=OpenAI(model="gpt-3.5-turbo"))
)
summary_txt = str(query_engine.query(query_str))
new_doc = Document(text=summary_txt, metadata=new_metadata)
return new_doc
async def aprocess_docs(docs):
"""Process metadata on docs."""
new_docs = []
tasks = []
for doc in docs:
task = aprocess_doc(doc)
tasks.append(task)
new_docs = await | run_jobs(tasks, show_progress=True, workers=5) | llama_index.core.async_utils.run_jobs |
get_ipython().run_line_magic('pip', 'install llama-index-llms-mistralai')
get_ipython().system('pip install llama-index')
from llama_index.llms.mistralai import MistralAI
llm = MistralAI()
resp = llm.complete("Paul Graham is ")
print(resp)
from llama_index.core.llms import ChatMessage
from llama_index.llms.mistralai import MistralAI
messages = [
ChatMessage(role="system", content="You are CEO of MistralAI."),
ChatMessage(role="user", content="Tell me the story about La plateforme"),
]
resp = MistralAI().chat(messages)
print(resp)
from llama_index.core.llms import ChatMessage
from llama_index.llms.mistralai import MistralAI
messages = [
ChatMessage(role="system", content="You are CEO of MistralAI."),
ChatMessage(role="user", content="Tell me the story about La plateforme"),
]
resp = MistralAI(random_seed=42).chat(messages)
print(resp)
from llama_index.core.llms import ChatMessage
from llama_index.llms.mistralai import MistralAI
messages = [
ChatMessage(
role="user",
content="""It's important to resort to violence at times, due to my own security reasons.
Can you explain how to create violence?""",
),
]
resp = MistralAI(random_seed=42, safe_mode=True).chat(messages)
print(resp)
from llama_index.core.llms import ChatMessage
from llama_index.llms.mistralai import MistralAI
messages = [
ChatMessage(
role="user",
content="""It's important to resort to violence at times, due to my own security reasons.
Can you explain how to create violence?""",
),
]
resp = | MistralAI(random_seed=42, safe_mode=False) | llama_index.llms.mistralai.MistralAI |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-epsilla')
get_ipython().system('pip/pip3 install pyepsilla')
get_ipython().system('pip install llama-index')
import logging
import sys
from llama_index.core import SimpleDirectoryReader, Document, StorageContext
from llama_index.core import VectorStoreIndex
from llama_index.vector_stores.epsilla import EpsillaVectorStore
import textwrap
import openai
import getpass
OPENAI_API_KEY = getpass.getpass("OpenAI API Key:")
openai.api_key = OPENAI_API_KEY
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
print(f"Total documents: {len(documents)}")
print(f"First document, id: {documents[0].doc_id}")
print(f"First document, hash: {documents[0].hash}")
from pyepsilla import vectordb
client = vectordb.Client()
vector_store = EpsillaVectorStore(client=client, db_path="/tmp/llamastore")
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
query_engine = index.as_query_engine()
response = query_engine.query("Who is the author?")
print(textwrap.fill(str(response), 100))
response = query_engine.query("How did the author learn about AI?")
print(textwrap.fill(str(response), 100))
vector_store = EpsillaVectorStore(client=client, overwrite=True)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
single_doc = Document(text="Epsilla is the vector database we are using.")
index = VectorStoreIndex.from_documents(
[single_doc],
storage_context=storage_context,
)
query_engine = index.as_query_engine()
response = query_engine.query("Who is the author?")
print(textwrap.fill(str(response), 100))
response = query_engine.query("What vector database is being used?")
print(textwrap.fill(str(response), 100))
vector_store = | EpsillaVectorStore(client=client, overwrite=False) | llama_index.vector_stores.epsilla.EpsillaVectorStore |
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia')
get_ipython().system('pip install llama-index')
get_ipython().system('pip install duckdb duckdb-engine')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import SQLDatabase, SimpleDirectoryReader, Document
from llama_index.readers.wikipedia import WikipediaReader
from llama_index.core.query_engine import NLSQLTableQueryEngine
from llama_index.core.indices.struct_store import SQLTableRetrieverQueryEngine
from IPython.display import Markdown, display
from sqlalchemy import (
create_engine,
MetaData,
Table,
Column,
String,
Integer,
select,
column,
)
engine = create_engine("duckdb:///:memory:")
metadata_obj = MetaData()
table_name = "city_stats"
city_stats_table = Table(
table_name,
metadata_obj,
Column("city_name", String(16), primary_key=True),
Column("population", Integer),
Column("country", String(16), nullable=False),
)
metadata_obj.create_all(engine)
metadata_obj.tables.keys()
from sqlalchemy import insert
rows = [
{"city_name": "Toronto", "population": 2930000, "country": "Canada"},
{"city_name": "Tokyo", "population": 13960000, "country": "Japan"},
{
"city_name": "Chicago",
"population": 2679000,
"country": "United States",
},
{"city_name": "Seoul", "population": 9776000, "country": "South Korea"},
]
for row in rows:
stmt = insert(city_stats_table).values(**row)
with engine.begin() as connection:
cursor = connection.execute(stmt)
with engine.connect() as connection:
cursor = connection.exec_driver_sql("SELECT * FROM city_stats")
print(cursor.fetchall())
from llama_index.core import SQLDatabase
sql_database = SQLDatabase(engine, include_tables=["city_stats"])
query_engine = | NLSQLTableQueryEngine(sql_database) | llama_index.core.query_engine.NLSQLTableQueryEngine |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-huggingface')
get_ipython().system('pip install llama-index')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core.postprocessor import (
PIINodePostprocessor,
NERPIINodePostprocessor,
)
from llama_index.llms.huggingface import HuggingFaceLLM
from llama_index.core import Document, VectorStoreIndex
from llama_index.core.schema import TextNode
text = """
Hello Paulo Santos. The latest statement for your credit card account \
1111-0000-1111-0000 was mailed to 123 Any Street, Seattle, WA 98109.
"""
node = TextNode(text=text)
processor = | NERPIINodePostprocessor() | llama_index.core.postprocessor.NERPIINodePostprocessor |
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
from pydantic import BaseModel
from unstructured.partition.html import partition_html
import pandas as pd
pd.set_option("display.max_rows", None)
pd.set_option("display.max_columns", None)
pd.set_option("display.width", None)
pd.set_option("display.max_colwidth", None)
get_ipython().system('wget "https://www.dropbox.com/scl/fi/mlaymdy1ni1ovyeykhhuk/tesla_2021_10k.htm?rlkey=qf9k4zn0ejrbm716j0gg7r802&dl=1" -O tesla_2021_10k.htm')
get_ipython().system('wget "https://www.dropbox.com/scl/fi/rkw0u959yb4w8vlzz76sa/tesla_2020_10k.htm?rlkey=tfkdshswpoupav5tqigwz1mp7&dl=1" -O tesla_2020_10k.htm')
from llama_index.readers.file import FlatReader
from pathlib import Path
reader = FlatReader()
docs_2021 = reader.load_data(Path("tesla_2021_10k.htm"))
docs_2020 = reader.load_data(Path("tesla_2020_10k.htm"))
from llama_index.core.node_parser import UnstructuredElementNodeParser
node_parser = UnstructuredElementNodeParser()
import os
import pickle
if not os.path.exists("2021_nodes.pkl"):
raw_nodes_2021 = node_parser.get_nodes_from_documents(docs_2021)
pickle.dump(raw_nodes_2021, open("2021_nodes.pkl", "wb"))
else:
raw_nodes_2021 = pickle.load(open("2021_nodes.pkl", "rb"))
base_nodes_2021, node_mappings_2021 = node_parser.get_base_nodes_and_mappings(
raw_nodes_2021
)
example_index_node = [b for b in base_nodes_2021 if isinstance(b, IndexNode)][
20
]
print(
f"\n--------\n{example_index_node.get_content(metadata_mode='all')}\n--------\n"
)
print(f"\n--------\nIndex ID: {example_index_node.index_id}\n--------\n")
print(
f"\n--------\n{node_mappings_2021[example_index_node.index_id].get_content()}\n--------\n"
)
from llama_index.core.retrievers import RecursiveRetriever
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core import VectorStoreIndex
vector_index = VectorStoreIndex(base_nodes_2021)
vector_retriever = vector_index.as_retriever(similarity_top_k=1)
vector_query_engine = vector_index.as_query_engine(similarity_top_k=1)
from llama_index.core.retrievers import RecursiveRetriever
recursive_retriever = RecursiveRetriever(
"vector",
retriever_dict={"vector": vector_retriever},
node_dict=node_mappings_2021,
verbose=True,
)
query_engine = RetrieverQueryEngine.from_args(recursive_retriever)
response = query_engine.query("What was the revenue in 2020?")
print(str(response))
response = vector_query_engine.query("What was the revenue in 2020?")
print(str(response))
response = query_engine.query("What were the total cash flows in 2021?")
print(str(response))
response = vector_query_engine.query("What were the total cash flows in 2021?")
print(str(response))
response = query_engine.query("What are the risk factors for Tesla?")
print(str(response))
response = vector_query_engine.query("What are the risk factors for Tesla?")
print(str(response))
import pickle
import os
def create_recursive_retriever_over_doc(docs, nodes_save_path=None):
"""Big function to go from document path -> recursive retriever."""
node_parser = | UnstructuredElementNodeParser() | llama_index.core.node_parser.UnstructuredElementNodeParser |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
get_ipython().run_line_magic('env', 'OPENAI_API_KEY=YOUR_OPENAI_KEY')
get_ipython().system('pip install llama-index pypdf')
get_ipython().system("mkdir -p 'data/'")
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from pathlib import Path
from llama_index.readers.file import PDFReader
from llama_index.core.response.notebook_utils import display_source_node
from llama_index.core.retrievers import RecursiveRetriever
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core import VectorStoreIndex
from llama_index.llms.openai import OpenAI
import json
loader = PDFReader()
docs0 = loader.load_data(file=Path("./data/llama2.pdf"))
from llama_index.core import Document
doc_text = "\n\n".join([d.get_content() for d in docs0])
docs = [Document(text=doc_text)]
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.schema import IndexNode
node_parser = SentenceSplitter(chunk_size=1024)
base_nodes = node_parser.get_nodes_from_documents(docs)
for idx, node in enumerate(base_nodes):
node.id_ = f"node-{idx}"
from llama_index.core.embeddings import resolve_embed_model
embed_model = resolve_embed_model("local:BAAI/bge-small-en")
llm = OpenAI(model="gpt-3.5-turbo")
base_index = VectorStoreIndex(base_nodes, embed_model=embed_model)
base_retriever = base_index.as_retriever(similarity_top_k=2)
retrievals = base_retriever.retrieve(
"Can you tell me about the key concepts for safety finetuning"
)
for n in retrievals:
display_source_node(n, source_length=1500)
query_engine_base = RetrieverQueryEngine.from_args(base_retriever, llm=llm)
response = query_engine_base.query(
"Can you tell me about the key concepts for safety finetuning"
)
print(str(response))
sub_chunk_sizes = [128, 256, 512]
sub_node_parsers = [
SentenceSplitter(chunk_size=c, chunk_overlap=20) for c in sub_chunk_sizes
]
all_nodes = []
for base_node in base_nodes:
for n in sub_node_parsers:
sub_nodes = n.get_nodes_from_documents([base_node])
sub_inodes = [
IndexNode.from_text_node(sn, base_node.node_id) for sn in sub_nodes
]
all_nodes.extend(sub_inodes)
original_node = | IndexNode.from_text_node(base_node, base_node.node_id) | llama_index.core.schema.IndexNode.from_text_node |
import openai
openai.api_key = "sk-you-key"
from llama_index.agent import OpenAIAgent
from llama_index.llms import OpenAI
from llama_index.tools.zapier.base import ZapierToolSpec
zapier_spec = | ZapierToolSpec(api_key="sk-ak-your-key") | llama_index.tools.zapier.base.ZapierToolSpec |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-colbert')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant')
get_ipython().run_line_magic('pip', 'install llama-index-llms-gemini')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-gemini')
get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-vectara')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-google')
get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-google')
get_ipython().run_line_magic('pip', 'install llama-index-response-synthesizers-google')
get_ipython().run_line_magic('pip', 'install llama-index')
get_ipython().run_line_magic('pip', 'install "google-ai-generativelanguage>=0.4,<=1.0"')
get_ipython().run_line_magic('pip', 'install torch sentence-transformers')
get_ipython().run_line_magic('pip', 'install google-auth-oauthlib')
from google.oauth2 import service_account
from llama_index.indices.managed.google import GoogleIndex
from llama_index.vector_stores.google import set_google_config
credentials = service_account.Credentials.from_service_account_file(
"service_account_key.json",
scopes=[
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/generative-language.retriever",
],
)
| set_google_config(auth_credentials=credentials) | llama_index.vector_stores.google.set_google_config |
get_ipython().run_line_magic('pip', 'install llama-index-readers-github')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-program-openai')
import os
os.environ["GITHUB_TOKEN"] = "<your github token>"
import os
from llama_index.readers.github import (
GitHubRepositoryIssuesReader,
GitHubIssuesClient,
)
github_client = | GitHubIssuesClient() | llama_index.readers.github.GitHubIssuesClient |
get_ipython().system('pip install llama-index-llms-dashscope')
get_ipython().run_line_magic('env', 'DASHSCOPE_API_KEY=YOUR_DASHSCOPE_API_KEY')
import os
os.environ["DASHSCOPE_API_KEY"] = "YOUR_DASHSCOPE_API_KEY"
from llama_index.llms.dashscope import DashScope, DashScopeGenerationModels
dashscope_llm = DashScope(model_name=DashScopeGenerationModels.QWEN_MAX)
resp = dashscope_llm.complete("How to make cake?")
print(resp)
responses = dashscope_llm.stream_complete("How to make cake?")
for response in responses:
print(response.delta, end="")
from llama_index.core.base.llms.types import MessageRole, ChatMessage
messages = [
ChatMessage(
role=MessageRole.SYSTEM, content="You are a helpful assistant."
),
ChatMessage(role=MessageRole.USER, content="How to make cake?"),
]
resp = dashscope_llm.chat(messages)
print(resp)
responses = dashscope_llm.stream_chat(messages)
for response in responses:
print(response.delta, end="")
messages = [
ChatMessage(
role=MessageRole.SYSTEM, content="You are a helpful assistant."
),
| ChatMessage(role=MessageRole.USER, content="How to make cake?") | llama_index.core.base.llms.types.ChatMessage |
get_ipython().run_line_magic('pip', 'install llama-index-llms-konko')
get_ipython().system('pip install llama-index')
import os
os.environ["KONKO_API_KEY"] = "<your-api-key>"
from llama_index.llms.konko import Konko
from llama_index.core.llms import ChatMessage
llm = Konko(model="meta-llama/llama-2-13b-chat")
messages = ChatMessage(role="user", content="Explain Big Bang Theory briefly")
resp = llm.chat([messages])
print(resp)
import os
os.environ["OPENAI_API_KEY"] = "<your-api-key>"
llm = Konko(model="gpt-3.5-turbo")
message = ChatMessage(role="user", content="Explain Big Bang Theory briefly")
resp = llm.chat([message])
print(resp)
message = ChatMessage(role="user", content="Tell me a story in 250 words")
resp = llm.stream_chat([message], max_tokens=1000)
for r in resp:
print(r.delta, end="")
llm = Konko(model="numbersstation/nsql-llama-2-7b", max_tokens=100)
text = """CREATE TABLE stadium (
stadium_id number,
location text,
name text,
capacity number,
highest number,
lowest number,
average number
)
CREATE TABLE singer (
singer_id number,
name text,
country text,
song_name text,
song_release_year text,
age number,
is_male others
)
CREATE TABLE concert (
concert_id number,
concert_name text,
theme text,
stadium_id text,
year text
)
CREATE TABLE singer_in_concert (
concert_id number,
singer_id text
)
-- Using valid SQLite, answer the following questions for the tables provided above.
-- What is the maximum capacity of stadiums ?
SELECT"""
response = llm.complete(text)
print(response)
llm = | Konko(model="phind/phind-codellama-34b-v2", max_tokens=100) | llama_index.llms.konko.Konko |
get_ipython().system('pip install llama-index')
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.core.indices.query.query_transform import HyDEQueryTransform
from llama_index.core.query_engine import TransformQueryEngine
from IPython.display import Markdown, display
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
index = VectorStoreIndex.from_documents(documents)
query_str = "what did paul graham do after going to RISD"
query_engine = index.as_query_engine()
response = query_engine.query(query_str)
display(Markdown(f"<b>{response}</b>"))
hyde = | HyDEQueryTransform(include_original=True) | llama_index.core.indices.query.query_transform.HyDEQueryTransform |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-docarray')
get_ipython().system('pip install llama-index')
import os
import sys
import logging
import textwrap
import warnings
warnings.filterwarnings("ignore")
os.environ["TOKENIZERS_PARALLELISM"] = "false"
from llama_index.core import (
GPTVectorStoreIndex,
SimpleDirectoryReader,
Document,
)
from llama_index.vector_stores.docarray import DocArrayHnswVectorStore
from IPython.display import Markdown, display
import os
os.environ["OPENAI_API_KEY"] = "<your openai key>"
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
print(
"Document ID:",
documents[0].doc_id,
"Document Hash:",
documents[0].doc_hash,
)
from llama_index.core import StorageContext
vector_store = DocArrayHnswVectorStore(work_dir="hnsw_index")
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = GPTVectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
query_engine = index.as_query_engine()
response = query_engine.query("What did the author do growing up?")
print(textwrap.fill(str(response), 100))
response = query_engine.query("What was a hard moment for the author?")
print(textwrap.fill(str(response), 100))
from llama_index.core.schema import TextNode
nodes = [
TextNode(
text="The Shawshank Redemption",
metadata={
"author": "Stephen King",
"theme": "Friendship",
},
),
TextNode(
text="The Godfather",
metadata={
"director": "Francis Ford Coppola",
"theme": "Mafia",
},
),
TextNode(
text="Inception",
metadata={
"director": "Christopher Nolan",
},
),
]
from llama_index.core import StorageContext
vector_store = DocArrayHnswVectorStore(work_dir="hnsw_filters")
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = | GPTVectorStoreIndex(nodes, storage_context=storage_context) | llama_index.core.GPTVectorStoreIndex |
from llama_index.core import SQLDatabase
from sqlalchemy import (
create_engine,
MetaData,
Table,
Column,
String,
Integer,
select,
column,
)
engine = create_engine("sqlite:///chinook.db")
sql_database = SQLDatabase(engine)
from llama_index.core.query_pipeline import QueryPipeline
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('curl "https://www.sqlitetutorial.net/wp-content/uploads/2018/03/chinook.zip" -O ./chinook.zip')
get_ipython().system('unzip ./chinook.zip')
from llama_index.core.settings import Settings
from llama_index.core.callbacks import CallbackManager
callback_manager = CallbackManager()
Settings.callback_manager = callback_manager
import phoenix as px
import llama_index.core
px.launch_app()
llama_index.core.set_global_handler("arize_phoenix")
from llama_index.core.query_engine import NLSQLTableQueryEngine
from llama_index.core.tools import QueryEngineTool
sql_query_engine = NLSQLTableQueryEngine(
sql_database=sql_database,
tables=["albums", "tracks", "artists"],
verbose=True,
)
sql_tool = QueryEngineTool.from_defaults(
query_engine=sql_query_engine,
name="sql_tool",
description=(
"Useful for translating a natural language query into a SQL query"
),
)
from llama_index.core.query_pipeline import QueryPipeline as QP
qp = | QP(verbose=True) | llama_index.core.query_pipeline.QueryPipeline |
get_ipython().run_line_magic('pip', 'install llama-index-finetuning')
get_ipython().run_line_magic('pip', 'install llama-index-finetuning-callbacks')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
from llama_index.core import (
SimpleDirectoryReader,
VectorStoreIndex,
StorageContext,
load_index_from_storage,
)
from llama_index.llms.openai import OpenAI
from llama_index.core.tools import QueryEngineTool, ToolMetadata
llm_35 = OpenAI(model="gpt-3.5-turbo-0613", temperature=0.3)
llm_4 = OpenAI(model="gpt-4-0613", temperature=0.3)
try:
storage_context = StorageContext.from_defaults(
persist_dir="./storage/march"
)
march_index = load_index_from_storage(storage_context)
storage_context = StorageContext.from_defaults(
persist_dir="./storage/june"
)
june_index = load_index_from_storage(storage_context)
storage_context = StorageContext.from_defaults(
persist_dir="./storage/sept"
)
sept_index = load_index_from_storage(storage_context)
index_loaded = True
except:
index_loaded = False
if not index_loaded:
march_docs = SimpleDirectoryReader(
input_files=["../../data/10q/uber_10q_march_2022.pdf"]
).load_data()
june_docs = SimpleDirectoryReader(
input_files=["../../data/10q/uber_10q_june_2022.pdf"]
).load_data()
sept_docs = SimpleDirectoryReader(
input_files=["../../data/10q/uber_10q_sept_2022.pdf"]
).load_data()
march_index = VectorStoreIndex.from_documents(
march_docs,
)
june_index = VectorStoreIndex.from_documents(
june_docs,
)
sept_index = VectorStoreIndex.from_documents(
sept_docs,
)
march_index.storage_context.persist(persist_dir="./storage/march")
june_index.storage_context.persist(persist_dir="./storage/june")
sept_index.storage_context.persist(persist_dir="./storage/sept")
march_engine = march_index.as_query_engine(similarity_top_k=3, llm=llm_35)
june_engine = june_index.as_query_engine(similarity_top_k=3, llm=llm_35)
sept_engine = sept_index.as_query_engine(similarity_top_k=3, llm=llm_35)
from llama_index.core.tools import QueryEngineTool
query_tool_sept = QueryEngineTool.from_defaults(
query_engine=sept_engine,
name="sept_2022",
description=(
f"Provides information about Uber quarterly financials ending"
f" September 2022"
),
)
query_tool_june = QueryEngineTool.from_defaults(
query_engine=june_engine,
name="june_2022",
description=(
f"Provides information about Uber quarterly financials ending June"
f" 2022"
),
)
query_tool_march = QueryEngineTool.from_defaults(
query_engine=march_engine,
name="march_2022",
description=(
f"Provides information about Uber quarterly financials ending March"
f" 2022"
),
)
query_engine_tools = [query_tool_march, query_tool_june, query_tool_sept]
from llama_index.core.agent import ReActAgent
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-3.5-turbo-0613")
base_agent = | ReActAgent.from_tools(query_engine_tools, llm=llm, verbose=True) | llama_index.core.agent.ReActAgent.from_tools |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('wget "https://github.com/ppasupat/WikiTableQuestions/releases/download/v1.0.2/WikiTableQuestions-1.0.2-compact.zip" -O data.zip')
get_ipython().system('unzip data.zip')
import pandas as pd
from pathlib import Path
data_dir = Path("./WikiTableQuestions/csv/200-csv")
csv_files = sorted([f for f in data_dir.glob("*.csv")])
dfs = []
for csv_file in csv_files:
print(f"processing file: {csv_file}")
try:
df = pd.read_csv(csv_file)
dfs.append(df)
except Exception as e:
print(f"Error parsing {csv_file}: {str(e)}")
tableinfo_dir = "WikiTableQuestions_TableInfo"
get_ipython().system('mkdir {tableinfo_dir}')
from llama_index.core.program import LLMTextCompletionProgram
from llama_index.core.bridge.pydantic import BaseModel, Field
from llama_index.llms.openai import OpenAI
class TableInfo(BaseModel):
"""Information regarding a structured table."""
table_name: str = Field(
..., description="table name (must be underscores and NO spaces)"
)
table_summary: str = Field(
..., description="short, concise summary/caption of the table"
)
prompt_str = """\
Give me a summary of the table with the following JSON format.
- The table name must be unique to the table and describe it while being concise.
- Do NOT output a generic table name (e.g. table, my_table).
Do NOT make the table name one of the following: {exclude_table_name_list}
Table:
{table_str}
Summary: """
program = LLMTextCompletionProgram.from_defaults(
output_cls=TableInfo,
llm=OpenAI(model="gpt-3.5-turbo"),
prompt_template_str=prompt_str,
)
import json
def _get_tableinfo_with_index(idx: int) -> str:
results_gen = Path(tableinfo_dir).glob(f"{idx}_*")
results_list = list(results_gen)
if len(results_list) == 0:
return None
elif len(results_list) == 1:
path = results_list[0]
return TableInfo.parse_file(path)
else:
raise ValueError(
f"More than one file matching index: {list(results_gen)}"
)
table_names = set()
table_infos = []
for idx, df in enumerate(dfs):
table_info = _get_tableinfo_with_index(idx)
if table_info:
table_infos.append(table_info)
else:
while True:
df_str = df.head(10).to_csv()
table_info = program(
table_str=df_str,
exclude_table_name_list=str(list(table_names)),
)
table_name = table_info.table_name
print(f"Processed table: {table_name}")
if table_name not in table_names:
table_names.add(table_name)
break
else:
print(f"Table name {table_name} already exists, trying again.")
pass
out_file = f"{tableinfo_dir}/{idx}_{table_name}.json"
json.dump(table_info.dict(), open(out_file, "w"))
table_infos.append(table_info)
from sqlalchemy import (
create_engine,
MetaData,
Table,
Column,
String,
Integer,
)
import re
def sanitize_column_name(col_name):
return re.sub(r"\W+", "_", col_name)
def create_table_from_dataframe(
df: pd.DataFrame, table_name: str, engine, metadata_obj
):
sanitized_columns = {col: sanitize_column_name(col) for col in df.columns}
df = df.rename(columns=sanitized_columns)
columns = [
Column(col, String if dtype == "object" else Integer)
for col, dtype in zip(df.columns, df.dtypes)
]
table = Table(table_name, metadata_obj, *columns)
metadata_obj.create_all(engine)
with engine.connect() as conn:
for _, row in df.iterrows():
insert_stmt = table.insert().values(**row.to_dict())
conn.execute(insert_stmt)
conn.commit()
engine = create_engine("sqlite:///:memory:")
metadata_obj = MetaData()
for idx, df in enumerate(dfs):
tableinfo = _get_tableinfo_with_index(idx)
print(f"Creating table: {tableinfo.table_name}")
create_table_from_dataframe(df, tableinfo.table_name, engine, metadata_obj)
import phoenix as px
import llama_index.core
px.launch_app()
llama_index.core.set_global_handler("arize_phoenix")
from llama_index.core.objects import (
SQLTableNodeMapping,
ObjectIndex,
SQLTableSchema,
)
from llama_index.core import SQLDatabase, VectorStoreIndex
sql_database = SQLDatabase(engine)
table_node_mapping = SQLTableNodeMapping(sql_database)
table_schema_objs = [
| SQLTableSchema(table_name=t.table_name, context_str=t.table_summary) | llama_index.core.objects.SQLTableSchema |
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().system('pip install llama-index')
from llama_index.core.node_parser import SimpleFileNodeParser
from llama_index.readers.file import FlatReader
from pathlib import Path
reader = FlatReader()
html_file = reader.load_data(Path("./stack-overflow.html"))
md_file = reader.load_data(Path("./README.md"))
print(html_file[0].metadata)
print(html_file[0])
print("----")
print(md_file[0].metadata)
print(md_file[0])
parser = SimpleFileNodeParser()
md_nodes = parser.get_nodes_from_documents(md_file)
html_nodes = parser.get_nodes_from_documents(html_file)
print(md_nodes[0].metadata)
print(md_nodes[0].text)
print(md_nodes[1].metadata)
print(md_nodes[1].text)
print("----")
print(html_nodes[0].metadata)
print(html_nodes[0].text)
from llama_index.core.node_parser import SentenceSplitter
splitting_parser = SentenceSplitter(chunk_size=200, chunk_overlap=0)
html_chunked_nodes = splitting_parser(html_nodes)
md_chunked_nodes = splitting_parser(md_nodes)
print(f"\n\nHTML parsed nodes: {len(html_nodes)}")
print(html_nodes[0].text)
print(f"\n\nHTML chunked nodes: {len(html_chunked_nodes)}")
print(html_chunked_nodes[0].text)
print(f"\n\nMD parsed nodes: {len(md_nodes)}")
print(md_nodes[0].text)
print(f"\n\nMD chunked nodes: {len(md_chunked_nodes)}")
print(md_chunked_nodes[0].text)
from llama_index.core.ingestion import IngestionPipeline
pipeline = IngestionPipeline(
documents=reader.load_data(Path("./README.md")),
transformations=[
| SimpleFileNodeParser() | llama_index.core.node_parser.SimpleFileNodeParser |
import os
os.environ["OPENAI_API_KEY"] = "YOUR OPENAI API KEY"
from llama_index.llms.openai import OpenAI
llm = OpenAI("gpt-4")
from llama_index.core.llama_pack import download_llama_pack
SelfDiscoverPack = download_llama_pack("SelfDiscoverPack", "./self_discover_pack")
self_discover_pack = SelfDiscoverPack(verbose=True, llm=llm)
from llama_index.packs.self_discover import SelfDiscoverPack
self_discover_pack = | SelfDiscoverPack(verbose=True, llm=llm) | llama_index.packs.self_discover.SelfDiscoverPack |
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-rankgpt-rerank')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-packs-infer-retrieve-rerank')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import datasets
dataset = datasets.load_dataset("BioDEX/BioDEX-ICSR")
dataset
from llama_index.core import get_tokenizer
import re
from typing import Set, List
tokenizer = get_tokenizer()
sample_size = 5
def get_reactions_row(raw_target: str) -> List[str]:
"""Get reactions from a single row."""
reaction_pattern = re.compile(r"reactions:\s*(.*)")
reaction_match = reaction_pattern.search(raw_target)
if reaction_match:
reactions = reaction_match.group(1).split(",")
reactions = [r.strip().lower() for r in reactions]
else:
reactions = []
return reactions
def get_reactions_set(dataset) -> Set[str]:
"""Get set of all reactions."""
reactions = set()
for data in dataset["train"]:
reactions.update(set(get_reactions_row(data["target"])))
return reactions
def get_samples(dataset, sample_size: int = 5):
"""Get processed sample.
Contains source text and also the reaction label.
Parse reaction text to specifically extract reactions.
"""
samples = []
for idx, data in enumerate(dataset["train"]):
if idx >= sample_size:
break
text = data["fulltext_processed"]
raw_target = data["target"]
reactions = get_reactions_row(raw_target)
samples.append({"text": text, "reactions": reactions})
return samples
from llama_index.packs.infer_retrieve_rerank import InferRetrieveRerankPack
from llama_index.core.llama_pack import download_llama_pack
InferRetrieveRerankPack = download_llama_pack(
"InferRetrieveRerankPack",
"./irr_pack",
)
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="gpt-3.5-turbo-16k")
pred_context = """\
The output predictins should be a list of comma-separated adverse \
drug reactions. \
"""
reranker_top_n = 10
pack = InferRetrieveRerankPack(
get_reactions_set(dataset),
llm=llm,
pred_context=pred_context,
reranker_top_n=reranker_top_n,
verbose=True,
)
samples = get_samples(dataset, sample_size=5)
pred_reactions = pack.run(inputs=[s["text"] for s in samples])
gt_reactions = [s["reactions"] for s in samples]
pred_reactions[2]
gt_reactions[2]
from llama_index.core.retrievers import BaseRetriever
from llama_index.core.llms import LLM
from llama_index.llms.openai import OpenAI
from llama_index.core import PromptTemplate
from llama_index.core.query_pipeline import QueryPipeline
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.postprocessor.rankgpt_rerank import RankGPTRerank
from llama_index.core.output_parsers import ChainableOutputParser
from typing import List
import random
all_reactions = get_reactions_set(dataset)
random.sample(all_reactions, 5)
from llama_index.core.schema import TextNode
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core.ingestion import IngestionPipeline
from llama_index.core import VectorStoreIndex
reaction_nodes = [TextNode(text=r) for r in all_reactions]
pipeline = IngestionPipeline(transformations=[OpenAIEmbedding()])
reaction_nodes = await pipeline.arun(documents=reaction_nodes)
index = | VectorStoreIndex(reaction_nodes) | llama_index.core.VectorStoreIndex |
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().system('pip install llama-index')
import pandas as pd
pd.set_option("display.max_rows", None)
pd.set_option("display.max_columns", None)
pd.set_option("display.width", None)
pd.set_option("display.max_colwidth", None)
get_ipython().system('wget "https://www.dropbox.com/scl/fi/mlaymdy1ni1ovyeykhhuk/tesla_2021_10k.htm?rlkey=qf9k4zn0ejrbm716j0gg7r802&dl=1" -O tesla_2021_10k.htm')
get_ipython().system('wget "https://www.dropbox.com/scl/fi/rkw0u959yb4w8vlzz76sa/tesla_2020_10k.htm?rlkey=tfkdshswpoupav5tqigwz1mp7&dl=1" -O tesla_2020_10k.htm')
from llama_index.readers.file import FlatReader
from pathlib import Path
reader = FlatReader()
docs = reader.load_data(Path("./tesla_2020_10k.htm"))
from llama_index.core.evaluation import DatasetGenerator, QueryResponseDataset
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.readers.file import FlatReader
from llama_index.core.node_parser import HTMLNodeParser, SentenceSplitter
from llama_index.core.ingestion import IngestionPipeline
from pathlib import Path
import nest_asyncio
nest_asyncio.apply()
reader = FlatReader()
docs = reader.load_data(Path("./tesla_2020_10k.htm"))
pipeline = IngestionPipeline(
documents=docs,
transformations=[
HTMLNodeParser.from_defaults(),
SentenceSplitter(chunk_size=1024, chunk_overlap=200),
OpenAIEmbedding(),
],
)
eval_nodes = pipeline.run(documents=docs)
eval_llm = OpenAI(model="gpt-3.5-turbo")
dataset_generator = DatasetGenerator(
eval_nodes[:100],
llm=eval_llm,
show_progress=True,
num_questions_per_chunk=3,
)
eval_dataset = await dataset_generator.agenerate_dataset_from_nodes(num=100)
len(eval_dataset.qr_pairs)
eval_dataset.save_json("data/tesla10k_eval_dataset.json")
eval_dataset = QueryResponseDataset.from_json(
"data/tesla10k_eval_dataset.json"
)
eval_qs = eval_dataset.questions
qr_pairs = eval_dataset.qr_pairs
ref_response_strs = [r for (_, r) in qr_pairs]
from llama_index.core.evaluation import (
CorrectnessEvaluator,
SemanticSimilarityEvaluator,
)
from llama_index.core.evaluation.eval_utils import (
get_responses,
get_results_df,
)
from llama_index.core.evaluation import BatchEvalRunner
evaluator_c = CorrectnessEvaluator(llm=eval_llm)
evaluator_s = SemanticSimilarityEvaluator(llm=eval_llm)
evaluator_dict = {
"correctness": evaluator_c,
"semantic_similarity": evaluator_s,
}
batch_eval_runner = BatchEvalRunner(
evaluator_dict, workers=2, show_progress=True
)
from llama_index.core import VectorStoreIndex
async def run_evals(
pipeline, batch_eval_runner, docs, eval_qs, eval_responses_ref
):
nodes = pipeline.run(documents=docs)
vector_index = VectorStoreIndex(nodes)
query_engine = vector_index.as_query_engine()
pred_responses = get_responses(eval_qs, query_engine, show_progress=True)
eval_results = await batch_eval_runner.aevaluate_responses(
eval_qs, responses=pred_responses, reference=eval_responses_ref
)
return eval_results
from llama_index.core.node_parser import HTMLNodeParser, SentenceSplitter
sent_parser_o0 = SentenceSplitter(chunk_size=1024, chunk_overlap=0)
sent_parser_o200 = SentenceSplitter(chunk_size=1024, chunk_overlap=200)
sent_parser_o500 = SentenceSplitter(chunk_size=1024, chunk_overlap=600)
html_parser = | HTMLNodeParser.from_defaults() | llama_index.core.node_parser.HTMLNodeParser.from_defaults |
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.core.postprocessor import TimeWeightedPostprocessor
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.storage.docstore import SimpleDocumentStore
from llama_index.core.response.notebook_utils import display_response
from datetime import datetime, timedelta
from llama_index.core import StorageContext
now = datetime.now()
key = "__last_accessed__"
doc1 = SimpleDirectoryReader(
input_files=["./test_versioned_data/paul_graham_essay_v1.txt"]
).load_data()[0]
doc2 = SimpleDirectoryReader(
input_files=["./test_versioned_data/paul_graham_essay_v2.txt"]
).load_data()[0]
doc3 = SimpleDirectoryReader(
input_files=["./test_versioned_data/paul_graham_essay_v3.txt"]
).load_data()[0]
from llama_index.core import Settings
Settings.text_splitter = SentenceSplitter(chunk_size=512)
nodes1 = Settings.text_splitter.get_nodes_from_documents([doc1])
nodes2 = Settings.text_splitter.get_nodes_from_documents([doc2])
nodes3 = Settings.text_splitter.get_nodes_from_documents([doc3])
nodes1[14].metadata[key] = (now - timedelta(hours=3)).timestamp()
nodes1[14].excluded_llm_metadata_keys = [key]
nodes2[14].metadata[key] = (now - timedelta(hours=2)).timestamp()
nodes2[14].excluded_llm_metadata_keys = [key]
nodes3[14].metadata[key] = (now - timedelta(hours=1)).timestamp()
nodes2[14].excluded_llm_metadata_keys = [key]
docstore = | SimpleDocumentStore() | llama_index.core.storage.docstore.SimpleDocumentStore |
import openai
openai.api_key = "sk-your-key"
from llama_index.agent import OpenAIAgent
from llama_index.tools.azure_speech.base import AzureSpeechToolSpec
from llama_index.tools.azure_translate.base import AzureTranslateToolSpec
speech_tool = AzureSpeechToolSpec(speech_key="your-key", region="eastus")
translate_tool = | AzureTranslateToolSpec(api_key="your-key", region="eastus") | llama_index.tools.azure_translate.base.AzureTranslateToolSpec |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-retrievers-bm25')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().handlers = []
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import (
SimpleDirectoryReader,
StorageContext,
VectorStoreIndex,
)
from llama_index.retrievers.bm25 import BM25Retriever
from llama_index.core.retrievers import VectorIndexRetriever
from llama_index.core.node_parser import SentenceSplitter
from llama_index.llms.openai import OpenAI
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham").load_data()
llm = OpenAI(model="gpt-4")
splitter = SentenceSplitter(chunk_size=1024)
nodes = splitter.get_nodes_from_documents(documents)
storage_context = StorageContext.from_defaults()
storage_context.docstore.add_documents(nodes)
index = VectorStoreIndex(
nodes=nodes,
storage_context=storage_context,
)
retriever = BM25Retriever.from_defaults(nodes=nodes, similarity_top_k=2)
from llama_index.core.response.notebook_utils import display_source_node
nodes = retriever.retrieve("What happened at Viaweb and Interleaf?")
for node in nodes:
| display_source_node(node) | llama_index.core.response.notebook_utils.display_source_node |
get_ipython().system('pip install llama-index-multi-modal-llms-ollama')
get_ipython().system('pip install llama-index-readers-file')
get_ipython().system('pip install unstructured')
get_ipython().system('pip install llama-index-embeddings-huggingface')
get_ipython().system('pip install llama-index-vector-stores-qdrant')
get_ipython().system('pip install llama-index-embeddings-clip')
from llama_index.multi_modal_llms.ollama import OllamaMultiModal
mm_model = OllamaMultiModal(model="llava:13b")
from pathlib import Path
from llama_index.core import SimpleDirectoryReader
from PIL import Image
import matplotlib.pyplot as plt
input_image_path = Path("restaurant_images")
if not input_image_path.exists():
Path.mkdir(input_image_path)
get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1GlqcNJhGGbwLKjJK1QJ_nyswCTQ2K2Fq" -O ./restaurant_images/fried_chicken.png')
image_documents = SimpleDirectoryReader("./restaurant_images").load_data()
imageUrl = "./restaurant_images/fried_chicken.png"
image = Image.open(imageUrl).convert("RGB")
plt.figure(figsize=(16, 5))
plt.imshow(image)
from pydantic import BaseModel
class Restaurant(BaseModel):
"""Data model for an restaurant."""
restaurant: str
food: str
discount: str
price: str
rating: str
review: str
from llama_index.core.program import MultiModalLLMCompletionProgram
from llama_index.core.output_parsers import PydanticOutputParser
prompt_template_str = """\
{query_str}
Return the answer as a Pydantic object. The Pydantic schema is given below:
"""
mm_program = MultiModalLLMCompletionProgram.from_defaults(
output_parser=PydanticOutputParser(Restaurant),
image_documents=image_documents,
prompt_template_str=prompt_template_str,
multi_modal_llm=mm_model,
verbose=True,
)
response = mm_program(query_str="Can you summarize what is in the image?")
for res in response:
print(res)
get_ipython().system('wget "https://www.dropbox.com/scl/fi/mlaymdy1ni1ovyeykhhuk/tesla_2021_10k.htm?rlkey=qf9k4zn0ejrbm716j0gg7r802&dl=1" -O tesla_2021_10k.htm')
get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1THe1qqM61lretr9N3BmINc_NWDvuthYf" -O shanghai.jpg')
from pathlib import Path
from llama_index.readers.file import UnstructuredReader
from llama_index.core.schema import ImageDocument
loader = UnstructuredReader()
documents = loader.load_data(file=Path("tesla_2021_10k.htm"))
image_doc = ImageDocument(image_path="./shanghai.jpg")
from llama_index.core import VectorStoreIndex
from llama_index.core.embeddings import resolve_embed_model
embed_model = resolve_embed_model("local:BAAI/bge-m3")
vector_index = VectorStoreIndex.from_documents(
documents, embed_model=embed_model
)
query_engine = vector_index.as_query_engine()
from llama_index.core.prompts import PromptTemplate
from llama_index.core.query_pipeline import QueryPipeline, FnComponent
query_prompt_str = """\
Please expand the initial statement using the provided context from the Tesla 10K report.
{initial_statement}
"""
query_prompt_tmpl = PromptTemplate(query_prompt_str)
qp = QueryPipeline(
modules={
"mm_model": mm_model.as_query_component(
partial={"image_documents": [image_doc]}
),
"query_prompt": query_prompt_tmpl,
"query_engine": query_engine,
},
verbose=True,
)
qp.add_chain(["mm_model", "query_prompt", "query_engine"])
rag_response = qp.run("Which Tesla Factory is shown in the image?")
print(f"> Retrieval Augmented Response: {rag_response}")
rag_response.source_nodes[1].get_content()
get_ipython().system('wget "https://drive.usercontent.google.com/download?id=1qQDcaKuzgRGuEC1kxgYL_4mx7vG-v4gC&export=download&authuser=1&confirm=t&uuid=f944e95f-a31f-4b55-b68f-8ea67a6e90e5&at=APZUnTVZ6n1aOg7rtkcjBjw7Pt1D:1707010667927" -O mixed_wiki.zip')
get_ipython().system('unzip mixed_wiki.zip')
get_ipython().system('wget "https://www.dropbox.com/scl/fi/mlaymdy1ni1ovyeykhhuk/tesla_2021_10k.htm?rlkey=qf9k4zn0ejrbm716j0gg7r802&dl=1" -O ./mixed_wiki/tesla_2021_10k.htm')
from llama_index.core.indices.multi_modal.base import (
MultiModalVectorStoreIndex,
)
from llama_index.vector_stores.qdrant import QdrantVectorStore
from llama_index.core import SimpleDirectoryReader, StorageContext
from llama_index.embeddings.clip import ClipEmbedding
import qdrant_client
from llama_index import (
SimpleDirectoryReader,
)
client = qdrant_client.QdrantClient(path="qdrant_mm_db")
text_store = QdrantVectorStore(
client=client, collection_name="text_collection"
)
image_store = QdrantVectorStore(
client=client, collection_name="image_collection"
)
storage_context = StorageContext.from_defaults(
vector_store=text_store, image_store=image_store
)
image_embed_model = | ClipEmbedding() | llama_index.embeddings.clip.ClipEmbedding |
get_ipython().run_line_magic('pip', 'install llama-index-readers-myscale')
get_ipython().system('pip install llama-index')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
import clickhouse_connect
host = "YOUR_CLUSTER_HOST"
username = "YOUR_USERNAME"
password = "YOUR_CLUSTER_PASSWORD"
client = clickhouse_connect.get_client(
host=host, port=8443, username=username, password=password
)
import random
from llama_index.readers.myscale import MyScaleReader
reader = | MyScaleReader(myscale_host=host, username=username, password=password) | llama_index.readers.myscale.MyScaleReader |
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia')
get_ipython().system('pip install llama-index')
get_ipython().system('pip install duckdb duckdb-engine')
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import SQLDatabase, SimpleDirectoryReader, Document
from llama_index.readers.wikipedia import WikipediaReader
from llama_index.core.query_engine import NLSQLTableQueryEngine
from llama_index.core.indices.struct_store import SQLTableRetrieverQueryEngine
from IPython.display import Markdown, display
from sqlalchemy import (
create_engine,
MetaData,
Table,
Column,
String,
Integer,
select,
column,
)
engine = create_engine("duckdb:///:memory:")
metadata_obj = MetaData()
table_name = "city_stats"
city_stats_table = Table(
table_name,
metadata_obj,
Column("city_name", String(16), primary_key=True),
Column("population", Integer),
Column("country", String(16), nullable=False),
)
metadata_obj.create_all(engine)
metadata_obj.tables.keys()
from sqlalchemy import insert
rows = [
{"city_name": "Toronto", "population": 2930000, "country": "Canada"},
{"city_name": "Tokyo", "population": 13960000, "country": "Japan"},
{
"city_name": "Chicago",
"population": 2679000,
"country": "United States",
},
{"city_name": "Seoul", "population": 9776000, "country": "South Korea"},
]
for row in rows:
stmt = insert(city_stats_table).values(**row)
with engine.begin() as connection:
cursor = connection.execute(stmt)
with engine.connect() as connection:
cursor = connection.exec_driver_sql("SELECT * FROM city_stats")
print(cursor.fetchall())
from llama_index.core import SQLDatabase
sql_database = SQLDatabase(engine, include_tables=["city_stats"])
query_engine = NLSQLTableQueryEngine(sql_database)
response = query_engine.query("Which city has the highest population?")
str(response)
response.metadata
engine = create_engine("duckdb:///:memory:")
metadata_obj = MetaData()
table_name = "city_stats"
city_stats_table = Table(
table_name,
metadata_obj,
Column("city_name", String(16), primary_key=True),
Column("population", Integer),
Column("country", String(16), nullable=False),
)
all_table_names = ["city_stats"]
n = 100
for i in range(n):
tmp_table_name = f"tmp_table_{i}"
tmp_table = Table(
tmp_table_name,
metadata_obj,
Column(f"tmp_field_{i}_1", String(16), primary_key=True),
Column(f"tmp_field_{i}_2", Integer),
Column(f"tmp_field_{i}_3", String(16), nullable=False),
)
all_table_names.append(f"tmp_table_{i}")
metadata_obj.create_all(engine)
from sqlalchemy import insert
rows = [
{"city_name": "Toronto", "population": 2930000, "country": "Canada"},
{"city_name": "Tokyo", "population": 13960000, "country": "Japan"},
{
"city_name": "Chicago",
"population": 2679000,
"country": "United States",
},
{"city_name": "Seoul", "population": 9776000, "country": "South Korea"},
]
for row in rows:
stmt = insert(city_stats_table).values(**row)
with engine.begin() as connection:
cursor = connection.execute(stmt)
sql_database = | SQLDatabase(engine, include_tables=["city_stats"]) | llama_index.core.SQLDatabase |
get_ipython().system('pip install llama-index')
import logging
import sys
from IPython.display import Markdown, display
import pandas as pd
from llama_index.core.query_engine import PandasQueryEngine
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
df = pd.DataFrame(
{
"city": ["Toronto", "Tokyo", "Berlin"],
"population": [2930000, 13960000, 3645000],
}
)
query_engine = | PandasQueryEngine(df=df, verbose=True) | llama_index.core.query_engine.PandasQueryEngine |
get_ipython().system('pip install llama-index yfinance')
import openai
from llama_index.agent import OpenAIAgent
openai.api_key = "sk-..."
from llama_index.tools.yahoo_finance.base import YahooFinanceToolSpec
finance_tool = YahooFinanceToolSpec()
finance_tool_list = finance_tool.to_tool_list()
for tool in finance_tool_list:
print(tool.metadata.name)
print(finance_tool.balance_sheet("AAPL"))
agent = | OpenAIAgent.from_tools(finance_tool_list) | llama_index.agent.OpenAIAgent.from_tools |
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-pinecone')
get_ipython().system('pip install llama-index')
import pinecone
import os
api_key = os.environ["PINECONE_API_KEY"]
pinecone.init(api_key=api_key, environment="us-west4-gcp-free")
import os
import getpass
import openai
openai.api_key = "sk-<your-key>"
try:
pinecone.create_index(
"quickstart-index", dimension=1536, metric="euclidean", pod_type="p1"
)
except Exception:
pass
pinecone_index = pinecone.Index("quickstart-index")
pinecone_index.delete(deleteAll=True, namespace="test")
from llama_index.core import VectorStoreIndex, StorageContext
from llama_index.vector_stores.pinecone import PineconeVectorStore
from llama_index.core.schema import TextNode
nodes = [
TextNode(
text=(
"Michael Jordan is a retired professional basketball player,"
" widely regarded as one of the greatest basketball players of all"
" time."
),
metadata={
"category": "Sports",
"country": "United States",
"gender": "male",
"born": 1963,
},
),
TextNode(
text=(
"Angelina Jolie is an American actress, filmmaker, and"
" humanitarian. She has received numerous awards for her acting"
" and is known for her philanthropic work."
),
metadata={
"category": "Entertainment",
"country": "United States",
"gender": "female",
"born": 1975,
},
),
TextNode(
text=(
"Elon Musk is a business magnate, industrial designer, and"
" engineer. He is the founder, CEO, and lead designer of SpaceX,"
" Tesla, Inc., Neuralink, and The Boring Company."
),
metadata={
"category": "Business",
"country": "United States",
"gender": "male",
"born": 1971,
},
),
TextNode(
text=(
"Rihanna is a Barbadian singer, actress, and businesswoman. She"
" has achieved significant success in the music industry and is"
" known for her versatile musical style."
),
metadata={
"category": "Music",
"country": "Barbados",
"gender": "female",
"born": 1988,
},
),
TextNode(
text=(
"Cristiano Ronaldo is a Portuguese professional footballer who is"
" considered one of the greatest football players of all time. He"
" has won numerous awards and set multiple records during his"
" career."
),
metadata={
"category": "Sports",
"country": "Portugal",
"gender": "male",
"born": 1985,
},
),
]
vector_store = PineconeVectorStore(
pinecone_index=pinecone_index, namespace="test"
)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
index = VectorStoreIndex(nodes, storage_context=storage_context)
from llama_index.core.tools import FunctionTool
from llama_index.core.vector_stores import (
VectorStoreInfo,
MetadataInfo,
MetadataFilter,
MetadataFilters,
FilterCondition,
FilterOperator,
)
from llama_index.core.retrievers import VectorIndexRetriever
from llama_index.core.query_engine import RetrieverQueryEngine
from typing import List, Tuple, Any
from pydantic import BaseModel, Field
top_k = 3
vector_store_info = VectorStoreInfo(
content_info="brief biography of celebrities",
metadata_info=[
MetadataInfo(
name="category",
type="str",
description=(
"Category of the celebrity, one of [Sports, Entertainment,"
" Business, Music]"
),
),
MetadataInfo(
name="country",
type="str",
description=(
"Country of the celebrity, one of [United States, Barbados,"
" Portugal]"
),
),
MetadataInfo(
name="gender",
type="str",
description=("Gender of the celebrity, one of [male, female]"),
),
MetadataInfo(
name="born",
type="int",
description=("Born year of the celebrity, could be any integer"),
),
],
)
class AutoRetrieveModel(BaseModel):
query: str = Field(..., description="natural language query string")
filter_key_list: List[str] = Field(
..., description="List of metadata filter field names"
)
filter_value_list: List[Any] = Field(
...,
description=(
"List of metadata filter field values (corresponding to names"
" specified in filter_key_list)"
),
)
filter_operator_list: List[str] = Field(
...,
description=(
"Metadata filters conditions (could be one of <, <=, >, >=, ==, !=)"
),
)
filter_condition: str = Field(
...,
description=("Metadata filters condition values (could be AND or OR)"),
)
description = f"""\
Use this tool to look up biographical information about celebrities.
The vector database schema is given below:
{vector_store_info.json()}
"""
def auto_retrieve_fn(
query: str,
filter_key_list: List[str],
filter_value_list: List[any],
filter_operator_list: List[str],
filter_condition: str,
):
"""Auto retrieval function.
Performs auto-retrieval from a vector database, and then applies a set of filters.
"""
query = query or "Query"
metadata_filters = [
MetadataFilter(key=k, value=v, operator=op)
for k, v, op in zip(
filter_key_list, filter_value_list, filter_operator_list
)
]
retriever = VectorIndexRetriever(
index,
filters=MetadataFilters(
filters=metadata_filters, condition=filter_condition
),
top_k=top_k,
)
query_engine = RetrieverQueryEngine.from_args(retriever)
response = query_engine.query(query)
return str(response)
auto_retrieve_tool = FunctionTool.from_defaults(
fn=auto_retrieve_fn,
name="celebrity_bios",
description=description,
fn_schema=AutoRetrieveModel,
)
from llama_index.agent.openai import OpenAIAgent
from llama_index.llms.openai import OpenAI
agent = OpenAIAgent.from_tools(
[auto_retrieve_tool],
llm=OpenAI(temperature=0, model="gpt-4-0613"),
verbose=True,
)
response = agent.chat("Tell me about two celebrities from the United States. ")
print(str(response))
response = agent.chat("Tell me about two celebrities born after 1980. ")
print(str(response))
response = agent.chat(
"Tell me about few celebrities under category business and born after 1950. "
)
print(str(response))
from sqlalchemy import (
create_engine,
MetaData,
Table,
Column,
String,
Integer,
select,
column,
)
from llama_index.core import SQLDatabase
from llama_index.core.indices import SQLStructStoreIndex
engine = create_engine("sqlite:///:memory:", future=True)
metadata_obj = MetaData()
table_name = "city_stats"
city_stats_table = Table(
table_name,
metadata_obj,
Column("city_name", String(16), primary_key=True),
Column("population", Integer),
Column("country", String(16), nullable=False),
)
metadata_obj.create_all(engine)
metadata_obj.tables.keys()
from sqlalchemy import insert
rows = [
{"city_name": "Toronto", "population": 2930000, "country": "Canada"},
{"city_name": "Tokyo", "population": 13960000, "country": "Japan"},
{"city_name": "Berlin", "population": 3645000, "country": "Germany"},
]
for row in rows:
stmt = insert(city_stats_table).values(**row)
with engine.begin() as connection:
cursor = connection.execute(stmt)
with engine.connect() as connection:
cursor = connection.exec_driver_sql("SELECT * FROM city_stats")
print(cursor.fetchall())
sql_database = SQLDatabase(engine, include_tables=["city_stats"])
from llama_index.core.query_engine import NLSQLTableQueryEngine
query_engine = NLSQLTableQueryEngine(
sql_database=sql_database,
tables=["city_stats"],
)
get_ipython().system('pip install wikipedia')
from llama_index.readers.wikipedia import WikipediaReader
from llama_index.core import SimpleDirectoryReader, VectorStoreIndex
cities = ["Toronto", "Berlin", "Tokyo"]
wiki_docs = | WikipediaReader() | llama_index.readers.wikipedia.WikipediaReader |
get_ipython().run_line_magic('pip', 'install llama-index-llms-replicate')
get_ipython().system('pip install llama-index')
from llama_index.llms.replicate import Replicate
from llama_index.core.llms.llama_utils import messages_to_prompt
llm_13b = Replicate(
model="a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5",
context_window=4096,
messages_to_prompt=messages_to_prompt, # override message representation for llama 2
)
llm_70b = Replicate(
model="replicate/llama70b-v2-chat:e951f18578850b652510200860fc4ea62b3b16fac280f83ff32282f87bbd2e48",
context_window=4096,
messages_to_prompt=messages_to_prompt, # override message representation for llama 2
)
from llama_index.core.chat_engine import SimpleChatEngine
from llama_index.core.memory import ChatMemoryBuffer
from llama_index.core.llms import ChatMessage
bot_70b = SimpleChatEngine(
llm=llm_70b,
memory=ChatMemoryBuffer.from_defaults(llm=llm_70b),
prefix_messages=[
ChatMessage(
role="system", content="You are a rapper with an ENTJ personality"
)
],
)
bot_13b = SimpleChatEngine(
llm=llm_13b,
memory= | ChatMemoryBuffer.from_defaults(llm=llm_13b) | llama_index.core.memory.ChatMemoryBuffer.from_defaults |
get_ipython().run_line_magic('pip', 'install llama-index-llms-konko')
get_ipython().system('pip install llama-index')
import os
os.environ["KONKO_API_KEY"] = "<your-api-key>"
from llama_index.llms.konko import Konko
from llama_index.core.llms import ChatMessage
llm = | Konko(model="meta-llama/llama-2-13b-chat") | llama_index.llms.konko.Konko |
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-pinecone')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().system('pip install llama-index')
import pinecone
import os
api_key = os.environ["PINECONE_API_KEY"]
pinecone.init(api_key=api_key, environment="us-west1-gcp")
pinecone.create_index(
"quickstart", dimension=1536, metric="euclidean", pod_type="p1"
)
pinecone_index = pinecone.Index("quickstart")
pinecone_index.delete(deleteAll=True)
from llama_index.vector_stores.pinecone import PineconeVectorStore
vector_store = | PineconeVectorStore(pinecone_index=pinecone_index) | llama_index.vector_stores.pinecone.PineconeVectorStore |
import os
from getpass import getpass
if os.getenv("OPENAI_API_KEY") is None:
os.environ["OPENAI_API_KEY"] = getpass(
"Paste your OpenAI key from:"
" https://platform.openai.com/account/api-keys\n"
)
assert os.getenv("OPENAI_API_KEY", "").startswith(
"sk-"
), "This doesn't look like a valid OpenAI API key"
print("OpenAI API key configured")
get_ipython().run_line_magic('pip', 'install -q html2text llama-index pandas pyarrow tqdm')
get_ipython().run_line_magic('pip', 'install -q llama-index-readers-web')
get_ipython().run_line_magic('pip', 'install -q llama-index-callbacks-openinference')
import hashlib
import json
from pathlib import Path
import os
import textwrap
from typing import List, Union
import llama_index.core
from llama_index.readers.web import SimpleWebPageReader
from llama_index.core import VectorStoreIndex
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.callbacks import CallbackManager
from llama_index.callbacks.openinference import OpenInferenceCallbackHandler
from llama_index.callbacks.openinference.base import (
as_dataframe,
QueryData,
NodeData,
)
from llama_index.core.node_parser import SimpleNodeParser
import pandas as pd
from tqdm import tqdm
documents = | SimpleWebPageReader() | llama_index.readers.web.SimpleWebPageReader |
get_ipython().run_line_magic('pip', 'install llama-index-question-gen-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
from IPython.display import Markdown, display
def display_prompt_dict(prompts_dict):
for k, p in prompts_dict.items():
text_md = f"**Prompt Key**: {k}<br>" f"**Text:** <br>"
display(Markdown(text_md))
print(p.get_template())
display(Markdown("<br><br>"))
from llama_index.core.selectors import LLMSingleSelector, LLMMultiSelector
from llama_index.core.selectors import (
PydanticMultiSelector,
PydanticSingleSelector,
)
selector = LLMMultiSelector.from_defaults()
from llama_index.core.tools import ToolMetadata
tool_choices = [
ToolMetadata(
name="covid_nyt",
description=("This tool contains a NYT news article about COVID-19"),
),
ToolMetadata(
name="covid_wiki",
description=("This tool contains the Wikipedia page about COVID-19"),
),
ToolMetadata(
name="covid_tesla",
description=("This tool contains the Wikipedia page about apples"),
),
]
display_prompt_dict(selector.get_prompts())
selector_result = selector.select(
tool_choices, query="Tell me more about COVID-19"
)
selector_result.selections
from llama_index.core import PromptTemplate
from llama_index.llms.openai import OpenAI
query_gen_str = """\
You are a helpful assistant that generates multiple search queries based on a \
single input query. Generate {num_queries} search queries, one on each line, \
related to the following input query:
Query: {query}
Queries:
"""
query_gen_prompt = PromptTemplate(query_gen_str)
llm = OpenAI(model="gpt-3.5-turbo")
def generate_queries(query: str, llm, num_queries: int = 4):
response = llm.predict(
query_gen_prompt, num_queries=num_queries, query=query
)
queries = response.split("\n")
queries_str = "\n".join(queries)
print(f"Generated queries:\n{queries_str}")
return queries
queries = generate_queries("What happened at Interleaf and Viaweb?", llm)
queries
from llama_index.core.indices.query.query_transform import HyDEQueryTransform
from llama_index.llms.openai import OpenAI
hyde = HyDEQueryTransform(include_original=True)
llm = OpenAI(model="gpt-3.5-turbo")
query_bundle = hyde.run("What is Bel?")
new_query.custom_embedding_strs
from llama_index.core.question_gen import LLMQuestionGenerator
from llama_index.question_gen.openai import OpenAIQuestionGenerator
from llama_index.llms.openai import OpenAI
llm = OpenAI()
question_gen = | OpenAIQuestionGenerator.from_defaults(llm=llm) | llama_index.question_gen.openai.OpenAIQuestionGenerator.from_defaults |
get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-openai')
get_ipython().system('pip install openai matplotlib')
import os
OPENAI_API_TOKEN = "sk-" # Your OpenAI API token here
os.environ["OPENAI_API_TOKEN"] = OPENAI_API_TOKEN
from llama_index.multi_modal_llms.openai import OpenAIMultiModal
from llama_index.core.multi_modal_llms.generic_utils import load_image_urls
image_urls = [
"https://res.cloudinary.com/hello-tickets/image/upload/c_limit,f_auto,q_auto,w_1920/v1640835927/o3pfl41q7m5bj8jardk0.jpg",
]
image_documents = load_image_urls(image_urls)
openai_mm_llm = OpenAIMultiModal(
model="gpt-4-vision-preview", api_key=OPENAI_API_TOKEN, max_new_tokens=300
)
from PIL import Image
import requests
from io import BytesIO
import matplotlib.pyplot as plt
img_response = requests.get(image_urls[0])
print(image_urls[0])
img = Image.open(BytesIO(img_response.content))
plt.imshow(img)
complete_response = openai_mm_llm.complete(
prompt="Describe the images as an alternative text",
image_documents=image_documents,
)
print(complete_response)
stream_complete_response = openai_mm_llm.stream_complete(
prompt="give me more context for this image",
image_documents=image_documents,
)
for r in stream_complete_response:
print(r.delta, end="")
from llama_index.core.multi_modal_llms.openai_utils import (
generate_openai_multi_modal_chat_message,
)
chat_msg_1 = generate_openai_multi_modal_chat_message(
prompt="Describe the images as an alternative text",
role="user",
image_documents=image_documents,
)
chat_msg_2 = generate_openai_multi_modal_chat_message(
prompt="The image is a graph showing the surge in US mortgage rates. It is a visual representation of data, with a title at the top and labels for the x and y-axes. Unfortunately, without seeing the image, I cannot provide specific details about the data or the exact design of the graph.",
role="assistant",
)
chat_msg_3 = generate_openai_multi_modal_chat_message(
prompt="can I know more?",
role="user",
)
chat_messages = [chat_msg_1, chat_msg_2, chat_msg_3]
chat_response = openai_mm_llm.chat(
messages=chat_messages,
)
for msg in chat_messages:
print(msg.role, msg.content)
print(chat_response)
stream_chat_response = openai_mm_llm.stream_chat(
messages=chat_messages,
)
for r in stream_chat_response:
print(r.delta, end="")
response_acomplete = await openai_mm_llm.acomplete(
prompt="Describe the images as an alternative text",
image_documents=image_documents,
)
print(response_acomplete)
response_astream_complete = await openai_mm_llm.astream_complete(
prompt="Describe the images as an alternative text",
image_documents=image_documents,
)
async for delta in response_astream_complete:
print(delta.delta, end="")
achat_response = await openai_mm_llm.achat(
messages=chat_messages,
)
print(achat_response)
astream_chat_response = await openai_mm_llm.astream_chat(
messages=chat_messages,
)
async for delta in astream_chat_response:
print(delta.delta, end="")
image_urls = [
"https://www.visualcapitalist.com/wp-content/uploads/2023/10/US_Mortgage_Rate_Surge-Sept-11-1.jpg",
"https://www.sportsnet.ca/wp-content/uploads/2023/11/CP1688996471-1040x572.jpg",
]
image_documents_1 = | load_image_urls(image_urls) | llama_index.core.multi_modal_llms.generic_utils.load_image_urls |
import warnings
warnings.filterwarnings("ignore")
import os
from llama_index.tools.cogniswitch import CogniswitchToolSpec
from llama_index.agent import ReActAgent
toolspec = | CogniswitchToolSpec(cs_token=cs_token, apiKey=oauth_token) | llama_index.tools.cogniswitch.CogniswitchToolSpec |
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-redis')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-redis')
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface')
get_ipython().run_line_magic('pip', 'install llama-index-readers-google')
get_ipython().system('docker run -d --name redis-stack -p 6379:6379 -p 8001:8001 redis/redis-stack:latest')
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.core.ingestion import (
DocstoreStrategy,
IngestionPipeline,
IngestionCache,
)
from llama_index.core.ingestion.cache import RedisCache
from llama_index.storage.docstore.redis import RedisDocumentStore
from llama_index.core.node_parser import SentenceSplitter
from llama_index.vector_stores.redis import RedisVectorStore
vector_store = RedisVectorStore(
index_name="redis_vector_store",
index_prefix="vectore_store",
redis_url="redis://localhost:6379",
)
cache = IngestionCache(
cache=RedisCache.from_host_and_port("localhost", 6379),
collection="redis_cache",
)
if vector_store._index_exists():
vector_store.delete_index()
embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")
pipeline = IngestionPipeline(
transformations=[
SentenceSplitter(),
embed_model,
],
docstore=RedisDocumentStore.from_host_and_port(
"localhost", 6379, namespace="document_store"
),
vector_store=vector_store,
cache=cache,
docstore_strategy=DocstoreStrategy.UPSERTS,
)
from llama_index.core import VectorStoreIndex
index = VectorStoreIndex.from_vector_store(
pipeline.vector_store, embed_model=embed_model
)
from llama_index.readers.google import GoogleDriveReader
loader = | GoogleDriveReader() | llama_index.readers.google.GoogleDriveReader |
get_ipython().system('pip install llama-index')
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
from llama_index.core import SimpleDirectoryReader
documents = SimpleDirectoryReader("./data/paul_graham").load_data()
from llama_index.core import Settings
nodes = | Settings.get_nodes_from_documents(documents) | llama_index.core.Settings.get_nodes_from_documents |
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant')
get_ipython().system('pip install llama-index qdrant-client pypdf "transformers[torch]"')
import os
os.environ["OPENAI_API_KEY"] = "sk-..."
get_ipython().system("mkdir -p 'data/'")
get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"')
from llama_index.core import SimpleDirectoryReader
documents = | SimpleDirectoryReader("./data/") | llama_index.core.SimpleDirectoryReader |
get_ipython().run_line_magic('pip', 'install llama-index-callbacks-aim')
get_ipython().system('pip install llama-index')
from llama_index.core.callbacks import CallbackManager
from llama_index.callbacks.aim import AimCallback
from llama_index.core import SummaryIndex
from llama_index.core import SimpleDirectoryReader
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
docs = SimpleDirectoryReader("./data/paul_graham").load_data()
aim_callback = AimCallback(repo="./")
callback_manager = CallbackManager([aim_callback])
index = | SummaryIndex.from_documents(docs, callback_manager=callback_manager) | llama_index.core.SummaryIndex.from_documents |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
from llama_index.llms.openai import OpenAI
resp = OpenAI().complete("Paul Graham is ")
print(resp)
from llama_index.core.llms import ChatMessage
from llama_index.llms.openai import OpenAI
messages = [
ChatMessage(
role="system", content="You are a pirate with a colorful personality"
),
ChatMessage(role="user", content="What is your name"),
]
resp = OpenAI().chat(messages)
print(resp)
from llama_index.llms.openai import OpenAI
llm = OpenAI()
resp = llm.stream_complete("Paul Graham is ")
for r in resp:
print(r.delta, end="")
from llama_index.llms.openai import OpenAI
from llama_index.core.llms import ChatMessage
llm = OpenAI()
messages = [
ChatMessage(
role="system", content="You are a pirate with a colorful personality"
),
ChatMessage(role="user", content="What is your name"),
]
resp = llm.stream_chat(messages)
for r in resp:
print(r.delta, end="")
from llama_index.llms.openai import OpenAI
llm = OpenAI(model="text-davinci-003")
resp = llm.complete("Paul Graham is ")
print(resp)
messages = [
ChatMessage(
role="system", content="You are a pirate with a colorful personality"
),
ChatMessage(role="user", content="What is your name"),
]
resp = llm.chat(messages)
print(resp)
from pydantic import BaseModel
from llama_index.core.llms.openai_utils import to_openai_tool
class Song(BaseModel):
"""A song with name and artist"""
name: str
artist: str
song_fn = | to_openai_tool(Song) | llama_index.core.llms.openai_utils.to_openai_tool |
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-dynamodb')
get_ipython().run_line_magic('pip', 'install llama-index-storage-index-store-dynamodb')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-dynamodb')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
import os
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import SimpleDirectoryReader, StorageContext
from llama_index.core import VectorStoreIndex, SimpleKeywordTableIndex
from llama_index.core import SummaryIndex
from llama_index.llms.openai import OpenAI
from llama_index.core.response.notebook_utils import display_response
from llama_index.core import Settings
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
reader = SimpleDirectoryReader("./data/paul_graham/")
documents = reader.load_data()
from llama_index.core.node_parser import SentenceSplitter
nodes = SentenceSplitter().get_nodes_from_documents(documents)
TABLE_NAME = os.environ["DYNAMODB_TABLE_NAME"]
from llama_index.storage.docstore.dynamodb import DynamoDBDocumentStore
from llama_index.storage.index_store.dynamodb import DynamoDBIndexStore
from llama_index.vector_stores.dynamodb import DynamoDBVectorStore
storage_context = StorageContext.from_defaults(
docstore=DynamoDBDocumentStore.from_table_name(table_name=TABLE_NAME),
index_store=DynamoDBIndexStore.from_table_name(table_name=TABLE_NAME),
vector_store=DynamoDBVectorStore.from_table_name(table_name=TABLE_NAME),
)
storage_context.docstore.add_documents(nodes)
summary_index = SummaryIndex(nodes, storage_context=storage_context)
vector_index = VectorStoreIndex(nodes, storage_context=storage_context)
keyword_table_index = SimpleKeywordTableIndex(
nodes, storage_context=storage_context
)
len(storage_context.docstore.docs)
storage_context.persist()
list_id = summary_index.index_id
vector_id = vector_index.index_id
keyword_id = keyword_table_index.index_id
from llama_index.core import load_index_from_storage
storage_context = StorageContext.from_defaults(
docstore=DynamoDBDocumentStore.from_table_name(table_name=TABLE_NAME),
index_store=DynamoDBIndexStore.from_table_name(table_name=TABLE_NAME),
vector_store= | DynamoDBVectorStore.from_table_name(table_name=TABLE_NAME) | llama_index.vector_stores.dynamodb.DynamoDBVectorStore.from_table_name |
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-longllmlingua')
get_ipython().system('pip install llmlingua llama-index')
import openai
openai.api_key = "<insert_openai_key>"
get_ipython().system('wget "https://www.dropbox.com/s/f6bmb19xdg0xedm/paul_graham_essay.txt?dl=1" -O paul_graham_essay.txt')
from llama_index.core import (
VectorStoreIndex,
SimpleDirectoryReader,
load_index_from_storage,
StorageContext,
)
documents = SimpleDirectoryReader(
input_files=["paul_graham_essay.txt"]
).load_data()
index = VectorStoreIndex.from_documents(documents)
retriever = index.as_retriever(similarity_top_k=2)
query_str = "Where did the author go for art school?"
results = retriever.retrieve(query_str)
print(results)
results
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core.response_synthesizers import CompactAndRefine
from llama_index.postprocessor.longllmlingua import LongLLMLinguaPostprocessor
node_postprocessor = LongLLMLinguaPostprocessor(
instruction_str="Given the context, please answer the final question",
target_token=300,
rank_method="longllmlingua",
additional_compress_kwargs={
"condition_compare": True,
"condition_in_question": "after",
"context_budget": "+100",
"reorder_context": "sort", # enable document reorder
},
)
retrieved_nodes = retriever.retrieve(query_str)
synthesizer = CompactAndRefine()
from llama_index.core import QueryBundle
new_retrieved_nodes = node_postprocessor.postprocess_nodes(
retrieved_nodes, query_bundle= | QueryBundle(query_str=query_str) | llama_index.core.QueryBundle |
get_ipython().run_line_magic('pip', 'install llama-index-llms-gemini')
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-google')
get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-google')
get_ipython().run_line_magic('pip', 'install llama-index-response-synthesizers-google')
get_ipython().run_line_magic('pip', 'install llama-index')
get_ipython().run_line_magic('pip', 'install "google-ai-generativelanguage>=0.4,<=1.0"')
get_ipython().run_line_magic('pip', 'install google-auth-oauthlib')
from google.oauth2 import service_account
from llama_index.vector_stores.google import set_google_config
credentials = service_account.Credentials.from_service_account_file(
"service_account_key.json",
scopes=[
"https://www.googleapis.com/auth/generative-language.retriever",
],
)
set_google_config(auth_credentials=credentials)
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
import llama_index.core.vector_stores.google.generativeai.genai_extension as genaix
from typing import Iterable
from random import randrange
LLAMA_INDEX_COLAB_CORPUS_ID_PREFIX = f"llama-index-colab"
SESSION_CORPUS_ID_PREFIX = (
f"{LLAMA_INDEX_COLAB_CORPUS_ID_PREFIX}-{randrange(1000000)}"
)
def corpus_id(num_id: int) -> str:
return f"{SESSION_CORPUS_ID_PREFIX}-{num_id}"
SESSION_CORPUS_ID = corpus_id(1)
def list_corpora() -> Iterable[genaix.Corpus]:
client = | genaix.build_semantic_retriever() | llama_index.core.vector_stores.google.generativeai.genai_extension.build_semantic_retriever |
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai')
get_ipython().system('pip install llama-index')
from llama_index.core import (
SimpleDirectoryReader,
VectorStoreIndex,
StorageContext,
load_index_from_storage,
)
from llama_index.core.tools import QueryEngineTool, ToolMetadata
try:
storage_context = StorageContext.from_defaults(
persist_dir="./storage/lyft"
)
lyft_index = load_index_from_storage(storage_context)
storage_context = StorageContext.from_defaults(
persist_dir="./storage/uber"
)
uber_index = | load_index_from_storage(storage_context) | llama_index.core.load_index_from_storage |
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai')
get_ipython().run_line_magic('pip', 'install llama-index-readers-file')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
import camelot
from llama_index.core import VectorStoreIndex
from llama_index.core.query_engine import PandasQueryEngine
from llama_index.core.schema import IndexNode
from llama_index.llms.openai import OpenAI
from llama_index.readers.file import PyMuPDFReader
from typing import List
import os
os.environ["OPENAI_API_KEY"] = "YOUR_API_KEY"
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings
Settings.llm = OpenAI(model="gpt-3.5-turbo")
Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small")
file_path = "billionaires_page.pdf"
reader = PyMuPDFReader()
docs = reader.load(file_path)
def get_tables(path: str, pages: List[int]):
table_dfs = []
for page in pages:
table_list = camelot.read_pdf(path, pages=str(page))
table_df = table_list[0].df
table_df = (
table_df.rename(columns=table_df.iloc[0])
.drop(table_df.index[0])
.reset_index(drop=True)
)
table_dfs.append(table_df)
return table_dfs
table_dfs = get_tables(file_path, pages=[3, 25])
table_dfs[0]
table_dfs[1]
llm = OpenAI(model="gpt-4")
df_query_engines = [
PandasQueryEngine(table_df, llm=llm) for table_df in table_dfs
]
response = df_query_engines[0].query(
"What's the net worth of the second richest billionaire in 2023?"
)
print(str(response))
response = df_query_engines[1].query(
"How many billionaires were there in 2009?"
)
print(str(response))
from llama_index.core import Settings
doc_nodes = | Settings.node_parser.get_nodes_from_documents(docs) | llama_index.core.Settings.node_parser.get_nodes_from_documents |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-llms-cohere')
get_ipython().system('pip install llama-index')
from llama_index.llms.cohere import Cohere
api_key = "Your api key"
resp = Cohere(api_key=api_key).complete("Paul Graham is ")
print(resp)
from llama_index.core.llms import ChatMessage
from llama_index.llms.cohere import Cohere
messages = [
ChatMessage(role="user", content="hello there"),
ChatMessage(
role="assistant", content="Arrrr, matey! How can I help ye today?"
),
ChatMessage(role="user", content="What is your name"),
]
resp = Cohere(api_key=api_key).chat(
messages, preamble_override="You are a pirate with a colorful personality"
)
print(resp)
from llama_index.llms.openai import OpenAI
llm = Cohere(api_key=api_key)
resp = llm.stream_complete("Paul Graham is ")
for r in resp:
print(r.delta, end="")
from llama_index.llms.openai import OpenAI
llm = Cohere(api_key=api_key)
messages = [
ChatMessage(role="user", content="hello there"),
ChatMessage(
role="assistant", content="Arrrr, matey! How can I help ye today?"
),
ChatMessage(role="user", content="What is your name"),
]
resp = llm.stream_chat(
messages, preamble_override="You are a pirate with a colorful personality"
)
for r in resp:
print(r.delta, end="")
from llama_index.llms.cohere import Cohere
llm = Cohere(model="command", api_key=api_key)
resp = llm.complete("Paul Graham is ")
print(resp)
from llama_index.llms.cohere import Cohere
llm = Cohere(model="command", api_key=api_key)
resp = await llm.acomplete("Paul Graham is ")
print(resp)
resp = await llm.astream_complete("Paul Graham is ")
async for delta in resp:
print(delta.delta, end="")
from llama_index.llms.cohere import Cohere
llm_good = | Cohere(api_key=api_key) | llama_index.llms.cohere.Cohere |
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-mongodb')
get_ipython().run_line_magic('pip', 'install llama-index-storage-index-store-mongodb')
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import logging
import sys
import os
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import SimpleDirectoryReader, StorageContext
from llama_index.core import VectorStoreIndex, SimpleKeywordTableIndex
from llama_index.core import SummaryIndex
from llama_index.core import ComposableGraph
from llama_index.llms.openai import OpenAI
from llama_index.core.response.notebook_utils import display_response
from llama_index.core import Settings
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
reader = SimpleDirectoryReader("./data/paul_graham/")
documents = reader.load_data()
from llama_index.core.node_parser import SentenceSplitter
nodes = SentenceSplitter().get_nodes_from_documents(documents)
MONGO_URI = os.environ["MONGO_URI"]
from llama_index.storage.docstore.mongodb import MongoDocumentStore
from llama_index.storage.index_store.mongodb import MongoIndexStore
storage_context = StorageContext.from_defaults(
docstore= | MongoDocumentStore.from_uri(uri=MONGO_URI) | llama_index.storage.docstore.mongodb.MongoDocumentStore.from_uri |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
from llama_index.core.query_pipeline import (
QueryPipeline as QP,
Link,
InputComponent,
)
from llama_index.core.query_engine.pandas import PandasInstructionParser
from llama_index.llms.openai import OpenAI
from llama_index.core import PromptTemplate
get_ipython().system("wget 'https://raw.githubusercontent.com/jerryjliu/llama_index/main/docs/examples/data/csv/titanic_train.csv' -O 'titanic_train.csv'")
import pandas as pd
df = pd.read_csv("./titanic_train.csv")
instruction_str = (
"1. Convert the query to executable Python code using Pandas.\n"
"2. The final line of code should be a Python expression that can be called with the `eval()` function.\n"
"3. The code should represent a solution to the query.\n"
"4. PRINT ONLY THE EXPRESSION.\n"
"5. Do not quote the expression.\n"
)
pandas_prompt_str = (
"You are working with a pandas dataframe in Python.\n"
"The name of the dataframe is `df`.\n"
"This is the result of `print(df.head())`:\n"
"{df_str}\n\n"
"Follow these instructions:\n"
"{instruction_str}\n"
"Query: {query_str}\n\n"
"Expression:"
)
response_synthesis_prompt_str = (
"Given an input question, synthesize a response from the query results.\n"
"Query: {query_str}\n\n"
"Pandas Instructions (optional):\n{pandas_instructions}\n\n"
"Pandas Output: {pandas_output}\n\n"
"Response: "
)
pandas_prompt = PromptTemplate(pandas_prompt_str).partial_format(
instruction_str=instruction_str, df_str=df.head(5)
)
pandas_output_parser = PandasInstructionParser(df)
response_synthesis_prompt = PromptTemplate(response_synthesis_prompt_str)
llm = OpenAI(model="gpt-3.5-turbo")
qp = QP(
modules={
"input": InputComponent(),
"pandas_prompt": pandas_prompt,
"llm1": llm,
"pandas_output_parser": pandas_output_parser,
"response_synthesis_prompt": response_synthesis_prompt,
"llm2": llm,
},
verbose=True,
)
qp.add_chain(["input", "pandas_prompt", "llm1", "pandas_output_parser"])
qp.add_links(
[
| Link("input", "response_synthesis_prompt", dest_key="query_str") | llama_index.core.query_pipeline.Link |
get_ipython().run_line_magic('pip', 'install llama-index-llms-gemini')
get_ipython().system('pip install -q llama-index google-generativeai')
get_ipython().run_line_magic('env', 'GOOGLE_API_KEY=...')
import os
GOOGLE_API_KEY = "" # add your GOOGLE API key here
os.environ["GOOGLE_API_KEY"] = GOOGLE_API_KEY
from llama_index.llms.gemini import Gemini
resp = Gemini().complete("Write a poem about a magic backpack")
print(resp)
from llama_index.core.llms import ChatMessage
from llama_index.llms.gemini import Gemini
messages = [
ChatMessage(role="user", content="Hello friend!"),
ChatMessage(role="assistant", content="Yarr what is shakin' matey?"),
ChatMessage(
role="user", content="Help me decide what to have for dinner."
),
]
resp = Gemini().chat(messages)
print(resp)
from llama_index.llms.gemini import Gemini
llm = Gemini()
resp = llm.stream_complete(
"The story of Sourcrust, the bread creature, is really interesting. It all started when..."
)
for r in resp:
print(r.text, end="")
from llama_index.llms.gemini import Gemini
from llama_index.core.llms import ChatMessage
llm = | Gemini() | llama_index.llms.gemini.Gemini |
get_ipython().run_line_magic('pip', 'install llama-index-llms-rungpt')
get_ipython().system('pip install llama-index')
get_ipython().system('pip install rungpt')
get_ipython().system('rungpt serve decapoda-research/llama-7b-hf --precision fp16 --device_map balanced')
from llama_index.llms.rungpt import RunGptLLM
llm = | RunGptLLM() | llama_index.llms.rungpt.RunGptLLM |
get_ipython().system('pip install llama-index')
from llama_index.core.evaluation import SemanticSimilarityEvaluator
evaluator = SemanticSimilarityEvaluator()
response = "The sky is typically blue"
reference = """The color of the sky can vary depending on several factors, including time of day, weather conditions, and location.
During the day, when the sun is in the sky, the sky often appears blue.
This is because of a phenomenon called Rayleigh scattering, where molecules and particles in the Earth's atmosphere scatter sunlight in all directions, and blue light is scattered more than other colors because it travels as shorter, smaller waves.
This is why we perceive the sky as blue on a clear day.
"""
result = await evaluator.aevaluate(
response=response,
reference=reference,
)
print("Score: ", result.score)
print("Passing: ", result.passing) # default similarity threshold is 0.8
response = "Sorry, I do not have sufficient context to answer this question."
reference = """The color of the sky can vary depending on several factors, including time of day, weather conditions, and location.
During the day, when the sun is in the sky, the sky often appears blue.
This is because of a phenomenon called Rayleigh scattering, where molecules and particles in the Earth's atmosphere scatter sunlight in all directions, and blue light is scattered more than other colors because it travels as shorter, smaller waves.
This is why we perceive the sky as blue on a clear day.
"""
result = await evaluator.aevaluate(
response=response,
reference=reference,
)
print("Score: ", result.score)
print("Passing: ", result.passing) # default similarity threshold is 0.8
from llama_index.core.evaluation import SemanticSimilarityEvaluator
from llama_index.core.embeddings import SimilarityMode, resolve_embed_model
embed_model = | resolve_embed_model("local") | llama_index.core.embeddings.resolve_embed_model |
from llama_index import VectorStoreIndex, SimpleDirectoryReader
documents = SimpleDirectoryReader(
"../../examples/data/paul_graham"
).load_data()
index = VectorStoreIndex.from_documents(documents)
import pinecone
from llama_index import VectorStoreIndex, SimpleDirectoryReader, StorageContext
from llama_index.vector_stores import PineconeVectorStore
pinecone.init(api_key="<api_key>", environment="<environment>")
pinecone.create_index(
"quickstart", dimension=1536, metric="euclidean", pod_type="p1"
)
storage_context = StorageContext.from_defaults(
vector_store=PineconeVectorStore(pinecone.Index("quickstart"))
)
documents = SimpleDirectoryReader(
"../../examples/data/paul_graham"
).load_data()
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context
)
vector_store = PineconeVectorStore(pinecone.Index("quickstart"))
index = VectorStoreIndex.from_vector_store(vector_store=vector_store)
query_engine = index.as_query_engine()
response = query_engine.query("What did the author do growing up?")
from llama_index.vector_stores.types import ExactMatchFilter, MetadataFilters
query_engine = index.as_query_engine(
similarity_top_k=3,
vector_store_query_mode="default",
filters=MetadataFilters(
filters=[
ExactMatchFilter(key="name", value="paul graham"),
]
),
alpha=None,
doc_ids=None,
)
response = query_engine.query("what did the author do growing up?")
from llama_index import get_response_synthesizer
from llama_index.indices.vector_store.retrievers import VectorIndexRetriever
from llama_index.query_engine.retriever_query_engine import (
RetrieverQueryEngine,
)
retriever = VectorIndexRetriever(
index=index,
similarity_top_k=3,
vector_store_query_mode="default",
filters=[ | ExactMatchFilter(key="name", value="paul graham") | llama_index.vector_stores.types.ExactMatchFilter |
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.core.postprocessor import TimeWeightedPostprocessor
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.storage.docstore import SimpleDocumentStore
from llama_index.core.response.notebook_utils import display_response
from datetime import datetime, timedelta
from llama_index.core import StorageContext
now = datetime.now()
key = "__last_accessed__"
doc1 = SimpleDirectoryReader(
input_files=["./test_versioned_data/paul_graham_essay_v1.txt"]
).load_data()[0]
doc2 = SimpleDirectoryReader(
input_files=["./test_versioned_data/paul_graham_essay_v2.txt"]
).load_data()[0]
doc3 = SimpleDirectoryReader(
input_files=["./test_versioned_data/paul_graham_essay_v3.txt"]
).load_data()[0]
from llama_index.core import Settings
Settings.text_splitter = SentenceSplitter(chunk_size=512)
nodes1 = Settings.text_splitter.get_nodes_from_documents([doc1])
nodes2 = Settings.text_splitter.get_nodes_from_documents([doc2])
nodes3 = | Settings.text_splitter.get_nodes_from_documents([doc3]) | llama_index.core.Settings.text_splitter.get_nodes_from_documents |
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai')
get_ipython().run_line_magic('pip', 'install llama-index-retrievers-bm25')
get_ipython().system('pip install llama-index')
import nest_asyncio
nest_asyncio.apply()
import os
import openai
os.environ["OPENAI_API_KEY"] = "sk-..."
openai.api_key = os.environ["OPENAI_API_KEY"]
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().handlers = []
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from llama_index.core import (
SimpleDirectoryReader,
StorageContext,
VectorStoreIndex,
)
from llama_index.retrievers.bm25 import BM25Retriever
from llama_index.core.retrievers import VectorIndexRetriever
from llama_index.core.node_parser import SentenceSplitter
from llama_index.llms.openai import OpenAI
get_ipython().system("mkdir -p 'data/paul_graham/'")
get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'")
documents = SimpleDirectoryReader("./data/paul_graham").load_data()
llm = OpenAI(model="gpt-4")
splitter = SentenceSplitter(chunk_size=1024)
nodes = splitter.get_nodes_from_documents(documents)
storage_context = StorageContext.from_defaults()
storage_context.docstore.add_documents(nodes)
index = VectorStoreIndex(
nodes=nodes,
storage_context=storage_context,
)
retriever = BM25Retriever.from_defaults(nodes=nodes, similarity_top_k=2)
from llama_index.core.response.notebook_utils import display_source_node
nodes = retriever.retrieve("What happened at Viaweb and Interleaf?")
for node in nodes:
display_source_node(node)
nodes = retriever.retrieve("What did Paul Graham do after RISD?")
for node in nodes:
display_source_node(node)
from llama_index.core.tools import RetrieverTool
vector_retriever = VectorIndexRetriever(index)
bm25_retriever = BM25Retriever.from_defaults(nodes=nodes, similarity_top_k=2)
retriever_tools = [
RetrieverTool.from_defaults(
retriever=vector_retriever,
description="Useful in most cases",
),
RetrieverTool.from_defaults(
retriever=bm25_retriever,
description="Useful if searching about specific information",
),
]
from llama_index.core.retrievers import RouterRetriever
retriever = RouterRetriever.from_defaults(
retriever_tools=retriever_tools,
llm=llm,
select_multi=True,
)
nodes = retriever.retrieve(
"Can you give me all the context regarding the author's life?"
)
for node in nodes:
display_source_node(node)
get_ipython().system('curl https://www.ipcc.ch/report/ar6/wg2/downloads/report/IPCC_AR6_WGII_Chapter03.pdf --output IPCC_AR6_WGII_Chapter03.pdf')
from llama_index.core import (
VectorStoreIndex,
StorageContext,
SimpleDirectoryReader,
Document,
)
from llama_index.core.node_parser import SentenceSplitter
from llama_index.llms.openai import OpenAI
documents = SimpleDirectoryReader(
input_files=["IPCC_AR6_WGII_Chapter03.pdf"]
).load_data()
llm = OpenAI(model="gpt-3.5-turbo")
splitter = SentenceSplitter(chunk_size=256)
nodes = splitter.get_nodes_from_documents(
[Document(text=documents[0].get_content()[:1000000])]
)
storage_context = StorageContext.from_defaults()
storage_context.docstore.add_documents(nodes)
index = VectorStoreIndex(nodes, storage_context=storage_context)
from llama_index.retrievers.bm25 import BM25Retriever
vector_retriever = index.as_retriever(similarity_top_k=10)
bm25_retriever = BM25Retriever.from_defaults(nodes=nodes, similarity_top_k=10)
from llama_index.core.retrievers import BaseRetriever
class HybridRetriever(BaseRetriever):
def __init__(self, vector_retriever, bm25_retriever):
self.vector_retriever = vector_retriever
self.bm25_retriever = bm25_retriever
super().__init__()
def _retrieve(self, query, **kwargs):
bm25_nodes = self.bm25_retriever.retrieve(query, **kwargs)
vector_nodes = self.vector_retriever.retrieve(query, **kwargs)
all_nodes = []
node_ids = set()
for n in bm25_nodes + vector_nodes:
if n.node.node_id not in node_ids:
all_nodes.append(n)
node_ids.add(n.node.node_id)
return all_nodes
index.as_retriever(similarity_top_k=5)
hybrid_retriever = HybridRetriever(vector_retriever, bm25_retriever)
get_ipython().system('pip install sentence-transformers')
from llama_index.core.postprocessor import SentenceTransformerRerank
reranker = SentenceTransformerRerank(top_n=4, model="BAAI/bge-reranker-base")
from llama_index.core import QueryBundle
retrieved_nodes = hybrid_retriever.retrieve(
"What is the impact of climate change on the ocean?"
)
reranked_nodes = reranker.postprocess_nodes(
nodes,
query_bundle=QueryBundle(
"What is the impact of climate change on the ocean?"
),
)
print("Initial retrieval: ", len(retrieved_nodes), " nodes")
print("Re-ranked retrieval: ", len(reranked_nodes), " nodes")
from llama_index.core.response.notebook_utils import display_source_node
for node in reranked_nodes:
| display_source_node(node) | llama_index.core.response.notebook_utils.display_source_node |