prompt
stringlengths
70
19.8k
completion
stringlengths
8
303
api
stringlengths
23
93
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-pinecone') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') get_ipython().system('pip -q install python-dotenv pinecone-client llama-index pymupdf') dotenv_path = ( "env" # Google Colabs will not let you open a .env, but you can set ) with open(dotenv_path, "w") as f: f.write('PINECONE_API_KEY="<your api key>"\n') f.write('PINECONE_ENVIRONMENT="gcp-starter"\n') f.write('OPENAI_API_KEY="<your api key>"\n') import os from dotenv import load_dotenv load_dotenv(dotenv_path=dotenv_path) import pinecone api_key = os.environ["PINECONE_API_KEY"] environment = os.environ["PINECONE_ENVIRONMENT"] pinecone.init(api_key=api_key, environment=environment) index_name = "llamaindex-rag-fs" pinecone.create_index( index_name, dimension=1536, metric="euclidean", pod_type="p1" ) pinecone_index = pinecone.Index(index_name) pinecone_index.delete(deleteAll=True) from llama_index.vector_stores.pinecone import PineconeVectorStore vector_store = PineconeVectorStore(pinecone_index=pinecone_index) get_ipython().system('mkdir data') get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') import fitz file_path = "./data/llama2.pdf" doc = fitz.open(file_path) from llama_index.core.node_parser import SentenceSplitter text_parser = SentenceSplitter( chunk_size=1024, ) text_chunks = [] doc_idxs = [] for doc_idx, page in enumerate(doc): page_text = page.get_text("text") cur_text_chunks = text_parser.split_text(page_text) text_chunks.extend(cur_text_chunks) doc_idxs.extend([doc_idx] * len(cur_text_chunks)) from llama_index.core.schema import TextNode nodes = [] for idx, text_chunk in enumerate(text_chunks): node = TextNode( text=text_chunk, ) src_doc_idx = doc_idxs[idx] src_page = doc[src_doc_idx] nodes.append(node) print(nodes[0].metadata) print(nodes[0].get_content(metadata_mode="all")) from llama_index.core.extractors import ( QuestionsAnsweredExtractor, TitleExtractor, ) from llama_index.core.ingestion import IngestionPipeline from llama_index.llms.openai import OpenAI llm = OpenAI(model="gpt-3.5-turbo") extractors = [ TitleExtractor(nodes=5, llm=llm),
QuestionsAnsweredExtractor(questions=3, llm=llm)
llama_index.core.extractors.QuestionsAnsweredExtractor
from llama_index.core import SQLDatabase from sqlalchemy import ( create_engine, MetaData, Table, Column, String, Integer, select, column, ) engine = create_engine("sqlite:///chinook.db") sql_database = SQLDatabase(engine) from llama_index.core.query_pipeline import QueryPipeline get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('curl "https://www.sqlitetutorial.net/wp-content/uploads/2018/03/chinook.zip" -O ./chinook.zip') get_ipython().system('unzip ./chinook.zip') from llama_index.core.settings import Settings from llama_index.core.callbacks import CallbackManager callback_manager = CallbackManager() Settings.callback_manager = callback_manager import phoenix as px import llama_index.core px.launch_app() llama_index.core.set_global_handler("arize_phoenix") from llama_index.core.query_engine import NLSQLTableQueryEngine from llama_index.core.tools import QueryEngineTool sql_query_engine = NLSQLTableQueryEngine( sql_database=sql_database, tables=["albums", "tracks", "artists"], verbose=True, ) sql_tool = QueryEngineTool.from_defaults( query_engine=sql_query_engine, name="sql_tool", description=( "Useful for translating a natural language query into a SQL query" ), ) from llama_index.core.query_pipeline import QueryPipeline as QP qp = QP(verbose=True) from llama_index.core.agent.react.types import ( ActionReasoningStep, ObservationReasoningStep, ResponseReasoningStep, ) from llama_index.core.agent import Task, AgentChatResponse from llama_index.core.query_pipeline import ( AgentInputComponent, AgentFnComponent, CustomAgentComponent, QueryComponent, ToolRunnerComponent, ) from llama_index.core.llms import MessageRole from typing import Dict, Any, Optional, Tuple, List, cast def agent_input_fn(task: Task, state: Dict[str, Any]) -> Dict[str, Any]: """Agent input function. Returns: A Dictionary of output keys and values. If you are specifying src_key when defining links between this component and other components, make sure the src_key matches the specified output_key. """ if "current_reasoning" not in state: state["current_reasoning"] = [] reasoning_step = ObservationReasoningStep(observation=task.input) state["current_reasoning"].append(reasoning_step) return {"input": task.input} agent_input_component = AgentInputComponent(fn=agent_input_fn) from llama_index.core.agent import ReActChatFormatter from llama_index.core.query_pipeline import InputComponent, Link from llama_index.core.llms import ChatMessage from llama_index.core.tools import BaseTool def react_prompt_fn( task: Task, state: Dict[str, Any], input: str, tools: List[BaseTool] ) -> List[ChatMessage]: chat_formatter = ReActChatFormatter() return chat_formatter.format( tools, chat_history=task.memory.get() + state["memory"].get_all(), current_reasoning=state["current_reasoning"], ) react_prompt_component = AgentFnComponent( fn=react_prompt_fn, partial_dict={"tools": [sql_tool]} ) from typing import Set, Optional from llama_index.core.agent.react.output_parser import ReActOutputParser from llama_index.core.llms import ChatResponse from llama_index.core.agent.types import Task def parse_react_output_fn( task: Task, state: Dict[str, Any], chat_response: ChatResponse ): """Parse ReAct output into a reasoning step.""" output_parser = ReActOutputParser() reasoning_step = output_parser.parse(chat_response.message.content) return {"done": reasoning_step.is_done, "reasoning_step": reasoning_step} parse_react_output = AgentFnComponent(fn=parse_react_output_fn) def run_tool_fn( task: Task, state: Dict[str, Any], reasoning_step: ActionReasoningStep ): """Run tool and process tool output.""" tool_runner_component = ToolRunnerComponent( [sql_tool], callback_manager=task.callback_manager ) tool_output = tool_runner_component.run_component( tool_name=reasoning_step.action, tool_input=reasoning_step.action_input, ) observation_step = ObservationReasoningStep(observation=str(tool_output)) state["current_reasoning"].append(observation_step) return {"response_str": observation_step.get_content(), "is_done": False} run_tool = AgentFnComponent(fn=run_tool_fn) def process_response_fn( task: Task, state: Dict[str, Any], response_step: ResponseReasoningStep ): """Process response.""" state["current_reasoning"].append(response_step) response_str = response_step.response state["memory"].put(ChatMessage(content=task.input, role=MessageRole.USER)) state["memory"].put( ChatMessage(content=response_str, role=MessageRole.ASSISTANT) ) return {"response_str": response_str, "is_done": True} process_response = AgentFnComponent(fn=process_response_fn) def process_agent_response_fn( task: Task, state: Dict[str, Any], response_dict: dict ): """Process agent response.""" return ( AgentChatResponse(response_dict["response_str"]), response_dict["is_done"], ) process_agent_response = AgentFnComponent(fn=process_agent_response_fn) from llama_index.core.query_pipeline import QueryPipeline as QP from llama_index.llms.openai import OpenAI qp.add_modules( { "agent_input": agent_input_component, "react_prompt": react_prompt_component, "llm": OpenAI(model="gpt-4-1106-preview"), "react_output_parser": parse_react_output, "run_tool": run_tool, "process_response": process_response, "process_agent_response": process_agent_response, } ) qp.add_chain(["agent_input", "react_prompt", "llm", "react_output_parser"]) qp.add_link( "react_output_parser", "run_tool", condition_fn=lambda x: not x["done"], input_fn=lambda x: x["reasoning_step"], ) qp.add_link( "react_output_parser", "process_response", condition_fn=lambda x: x["done"], input_fn=lambda x: x["reasoning_step"], ) qp.add_link("process_response", "process_agent_response") qp.add_link("run_tool", "process_agent_response") from pyvis.network import Network net = Network(notebook=True, cdn_resources="in_line", directed=True) net.from_nx(qp.clean_dag) net.show("agent_dag.html") from llama_index.core.agent import QueryPipelineAgentWorker, AgentRunner from llama_index.core.callbacks import CallbackManager agent_worker = QueryPipelineAgentWorker(qp) agent = AgentRunner( agent_worker, callback_manager=CallbackManager([]), verbose=True ) task = agent.create_task( "What are some tracks from the artist AC/DC? Limit it to 3" ) step_output = agent.run_step(task.task_id) step_output = agent.run_step(task.task_id) step_output.is_last response = agent.finalize_response(task.task_id) print(str(response)) agent.reset() response = agent.chat( "What are some tracks from the artist AC/DC? Limit it to 3" ) print(str(response)) from llama_index.llms.openai import OpenAI llm = OpenAI(model="gpt-4-1106-preview") from llama_index.core.agent import Task, AgentChatResponse from typing import Dict, Any from llama_index.core.query_pipeline import ( AgentInputComponent, AgentFnComponent, ) def agent_input_fn(task: Task, state: Dict[str, Any]) -> Dict: """Agent input function.""" if "convo_history" not in state: state["convo_history"] = [] state["count"] = 0 state["convo_history"].append(f"User: {task.input}") convo_history_str = "\n".join(state["convo_history"]) or "None" return {"input": task.input, "convo_history": convo_history_str} agent_input_component = AgentInputComponent(fn=agent_input_fn) from llama_index.core import PromptTemplate retry_prompt_str = """\ You are trying to generate a proper natural language query given a user input. This query will then be interpreted by a downstream text-to-SQL agent which will convert the query to a SQL statement. If the agent triggers an error, then that will be reflected in the current conversation history (see below). If the conversation history is None, use the user input. If its not None, generate a new SQL query that avoids the problems of the previous SQL query. Input: {input} Convo history (failed attempts): {convo_history} New input: """ retry_prompt = PromptTemplate(retry_prompt_str) from llama_index.core import Response from typing import Tuple validate_prompt_str = """\ Given the user query, validate whether the inferred SQL query and response from executing the query is correct and answers the query. Answer with YES or NO. Query: {input} Inferred SQL query: {sql_query} SQL Response: {sql_response} Result: """ validate_prompt = PromptTemplate(validate_prompt_str) MAX_ITER = 3 def agent_output_fn( task: Task, state: Dict[str, Any], output: Response ) -> Tuple[AgentChatResponse, bool]: """Agent output component.""" print(f"> Inferred SQL Query: {output.metadata['sql_query']}") print(f"> SQL Response: {str(output)}") state["convo_history"].append( f"Assistant (inferred SQL query): {output.metadata['sql_query']}" ) state["convo_history"].append(f"Assistant (response): {str(output)}") validate_prompt_partial = validate_prompt.as_query_component( partial={ "sql_query": output.metadata["sql_query"], "sql_response": str(output), } ) qp = QP(chain=[validate_prompt_partial, llm]) validate_output = qp.run(input=task.input) state["count"] += 1 is_done = False if state["count"] >= MAX_ITER: is_done = True if "YES" in validate_output.message.content: is_done = True return AgentChatResponse(response=str(output)), is_done agent_output_component = AgentFnComponent(fn=agent_output_fn) from llama_index.core.query_pipeline import ( QueryPipeline as QP, Link, InputComponent, ) qp = QP( modules={ "input": agent_input_component, "retry_prompt": retry_prompt, "llm": llm, "sql_query_engine": sql_query_engine, "output_component": agent_output_component, }, verbose=True, ) qp.add_link("input", "retry_prompt", src_key="input", dest_key="input") qp.add_link( "input", "retry_prompt", src_key="convo_history", dest_key="convo_history" ) qp.add_chain(["retry_prompt", "llm", "sql_query_engine", "output_component"]) from pyvis.network import Network net = Network(notebook=True, cdn_resources="in_line", directed=True) net.from_nx(qp.dag) net.show("agent_dag.html") from llama_index.core.agent import QueryPipelineAgentWorker, AgentRunner from llama_index.core.callbacks import CallbackManager agent_worker =
QueryPipelineAgentWorker(qp)
llama_index.core.agent.QueryPipelineAgentWorker
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-redis') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-redis') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface') get_ipython().run_line_magic('pip', 'install llama-index-readers-google') get_ipython().system('docker run -d --name redis-stack -p 6379:6379 -p 8001:8001 redis/redis-stack:latest') import os os.environ["OPENAI_API_KEY"] = "sk-..." from llama_index.embeddings.huggingface import HuggingFaceEmbedding from llama_index.core.ingestion import ( DocstoreStrategy, IngestionPipeline, IngestionCache, ) from llama_index.core.ingestion.cache import RedisCache from llama_index.storage.docstore.redis import RedisDocumentStore from llama_index.core.node_parser import SentenceSplitter from llama_index.vector_stores.redis import RedisVectorStore vector_store = RedisVectorStore( index_name="redis_vector_store", index_prefix="vectore_store", redis_url="redis://localhost:6379", ) cache = IngestionCache( cache=
RedisCache.from_host_and_port("localhost", 6379)
llama_index.core.ingestion.cache.RedisCache.from_host_and_port
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import os from getpass import getpass if os.getenv("OPENAI_API_KEY") is None: os.environ["OPENAI_API_KEY"] = getpass( "Paste your OpenAI key from:" " https://platform.openai.com/account/api-keys\n" ) assert os.getenv("OPENAI_API_KEY", "").startswith( "sk-" ), "This doesn't look like a valid OpenAI API key" print("OpenAI API key configured") import os from getpass import getpass if os.getenv("HONEYHIVE_API_KEY") is None: os.environ["HONEYHIVE_API_KEY"] = getpass( "Paste your HoneyHive key from:" " https://app.honeyhive.ai/settings/account\n" ) print("HoneyHive API key configured") get_ipython().system('pip install llama-index') from llama_index.core.callbacks import CallbackManager from llama_index.core.callbacks import LlamaDebugHandler from llama_index.core import ( VectorStoreIndex, SimpleDirectoryReader, SimpleKeywordTableIndex, StorageContext, ) from llama_index.core import ComposableGraph from llama_index.llms.openai import OpenAI from honeyhive.utils.llamaindex_tracer import HoneyHiveLlamaIndexTracer from llama_index.core import Settings Settings.llm = OpenAI(model="gpt-4", temperature=0) import llama_index.core from llama_index.core import set_global_handler set_global_handler( "honeyhive", project="My LlamaIndex Project", name="My LlamaIndex Pipeline", api_key=os.environ["HONEYHIVE_API_KEY"], ) hh_tracer = llama_index.core.global_handler llama_debug =
LlamaDebugHandler(print_trace_on_end=True)
llama_index.core.callbacks.LlamaDebugHandler
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-llms-cohere') get_ipython().system('pip install llama-index') from llama_index.llms.cohere import Cohere api_key = "Your api key" resp = Cohere(api_key=api_key).complete("Paul Graham is ") print(resp) from llama_index.core.llms import ChatMessage from llama_index.llms.cohere import Cohere messages = [ ChatMessage(role="user", content="hello there"), ChatMessage( role="assistant", content="Arrrr, matey! How can I help ye today?" ), ChatMessage(role="user", content="What is your name"), ] resp =
Cohere(api_key=api_key)
llama_index.llms.cohere.Cohere
from llama_index import VectorStoreIndex, SimpleDirectoryReader documents = SimpleDirectoryReader( "../../examples/data/paul_graham" ).load_data() index =
VectorStoreIndex.from_documents(documents)
llama_index.VectorStoreIndex.from_documents
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') import nest_asyncio nest_asyncio.apply() get_ipython().system('mkdir data && wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') get_ipython().system('pip install llama_hub') from pathlib import Path from llama_index.readers.file import PDFReader from llama_index.readers.file import UnstructuredReader from llama_index.readers.file import PyMuPDFReader loader =
PDFReader()
llama_index.readers.file.PDFReader
get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-postgres') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-huggingface') get_ipython().run_line_magic('pip', 'install llama-index-llms-llama-cpp') from llama_index.embeddings.huggingface import HuggingFaceEmbedding embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en") get_ipython().system('pip install llama-cpp-python') from llama_index.llms.llama_cpp import LlamaCPP model_url = "https://huggingface.co/TheBloke/Llama-2-13B-chat-GGUF/resolve/main/llama-2-13b-chat.Q4_0.gguf" llm = LlamaCPP( model_url=model_url, model_path=None, temperature=0.1, max_new_tokens=256, context_window=3900, generate_kwargs={}, model_kwargs={"n_gpu_layers": 1}, verbose=True, ) get_ipython().system('pip install psycopg2-binary pgvector asyncpg "sqlalchemy[asyncio]" greenlet') import psycopg2 db_name = "vector_db" host = "localhost" password = "password" port = "5432" user = "jerry" conn = psycopg2.connect( dbname="postgres", host=host, password=password, port=port, user=user, ) conn.autocommit = True with conn.cursor() as c: c.execute(f"DROP DATABASE IF EXISTS {db_name}") c.execute(f"CREATE DATABASE {db_name}") from sqlalchemy import make_url from llama_index.vector_stores.postgres import PGVectorStore vector_store = PGVectorStore.from_params( database=db_name, host=host, password=password, port=port, user=user, table_name="llama2_paper", embed_dim=384, # openai embedding dimension ) get_ipython().system('mkdir data') get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') from pathlib import Path from llama_index.readers.file import PyMuPDFReader loader = PyMuPDFReader() documents = loader.load(file_path="./data/llama2.pdf") from llama_index.core.node_parser import SentenceSplitter text_parser = SentenceSplitter( chunk_size=1024, ) text_chunks = [] doc_idxs = [] for doc_idx, doc in enumerate(documents): cur_text_chunks = text_parser.split_text(doc.text) text_chunks.extend(cur_text_chunks) doc_idxs.extend([doc_idx] * len(cur_text_chunks)) from llama_index.core.schema import TextNode nodes = [] for idx, text_chunk in enumerate(text_chunks): node = TextNode( text=text_chunk, ) src_doc = documents[doc_idxs[idx]] node.metadata = src_doc.metadata nodes.append(node) for node in nodes: node_embedding = embed_model.get_text_embedding( node.get_content(metadata_mode="all") ) node.embedding = node_embedding vector_store.add(nodes) query_str = "Can you tell me about the key concepts for safety finetuning" query_embedding = embed_model.get_query_embedding(query_str) from llama_index.core.vector_stores import VectorStoreQuery query_mode = "default" vector_store_query = VectorStoreQuery( query_embedding=query_embedding, similarity_top_k=2, mode=query_mode ) query_result = vector_store.query(vector_store_query) print(query_result.nodes[0].get_content()) from llama_index.core.schema import NodeWithScore from typing import Optional nodes_with_scores = [] for index, node in enumerate(query_result.nodes): score: Optional[float] = None if query_result.similarities is not None: score = query_result.similarities[index] nodes_with_scores.append(NodeWithScore(node=node, score=score)) from llama_index.core import QueryBundle from llama_index.core.retrievers import BaseRetriever from typing import Any, List class VectorDBRetriever(BaseRetriever): """Retriever over a postgres vector store.""" def __init__( self, vector_store: PGVectorStore, embed_model: Any, query_mode: str = "default", similarity_top_k: int = 2, ) -> None: """Init params.""" self._vector_store = vector_store self._embed_model = embed_model self._query_mode = query_mode self._similarity_top_k = similarity_top_k super().__init__() def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]: """Retrieve.""" query_embedding = embed_model.get_query_embedding( query_bundle.query_str ) vector_store_query = VectorStoreQuery( query_embedding=query_embedding, similarity_top_k=self._similarity_top_k, mode=self._query_mode, ) query_result = vector_store.query(vector_store_query) nodes_with_scores = [] for index, node in enumerate(query_result.nodes): score: Optional[float] = None if query_result.similarities is not None: score = query_result.similarities[index] nodes_with_scores.append(
NodeWithScore(node=node, score=score)
llama_index.core.schema.NodeWithScore
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') qa_prompt_str = ( "Context information is below.\n" "---------------------\n" "{context_str}\n" "---------------------\n" "Given the context information and not prior knowledge, " "answer the question: {query_str}\n" ) refine_prompt_str = ( "We have the opportunity to refine the original answer " "(only if needed) with some more context below.\n" "------------\n" "{context_msg}\n" "------------\n" "Given the new context, refine the original answer to better " "answer the question: {query_str}. " "If the context isn't useful, output the original answer again.\n" "Original Answer: {existing_answer}" ) from llama_index.core.llms import ChatMessage, MessageRole from llama_index.core import ChatPromptTemplate chat_text_qa_msgs = [ ChatMessage( role=MessageRole.SYSTEM, content=( "Always answer the question, even if the context isn't helpful." ), ), ChatMessage(role=MessageRole.USER, content=qa_prompt_str), ] text_qa_template = ChatPromptTemplate(chat_text_qa_msgs) chat_refine_msgs = [ ChatMessage( role=MessageRole.SYSTEM, content=( "Always answer the question, even if the context isn't helpful." ), ), ChatMessage(role=MessageRole.USER, content=refine_prompt_str), ] refine_template = ChatPromptTemplate(chat_refine_msgs) from llama_index.core import ChatPromptTemplate chat_text_qa_msgs = [ ( "system", "Always answer the question, even if the context isn't helpful.", ), ("user", qa_prompt_str), ] text_qa_template =
ChatPromptTemplate.from_messages(chat_text_qa_msgs)
llama_index.core.ChatPromptTemplate.from_messages
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia') get_ipython().run_line_magic('pip', 'install llama-index-finetuning') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-finetuning-callbacks') get_ipython().run_line_magic('pip', 'install llama-index-llms-huggingface') import nest_asyncio nest_asyncio.apply() import os HUGGING_FACE_TOKEN = os.getenv("HUGGING_FACE_TOKEN") OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") import pandas as pd def display_eval_df(question, source, answer_a, answer_b, result) -> None: """Pretty print question/answer + gpt-4 judgement dataset.""" eval_df = pd.DataFrame( { "Question": question, "Source": source, "Model A": answer_a["model"], "Answer A": answer_a["text"], "Model B": answer_b["model"], "Answer B": answer_b["text"], "Score": result.score, "Judgement": result.feedback, }, index=[0], ) eval_df = eval_df.style.set_properties( **{ "inline-size": "300px", "overflow-wrap": "break-word", }, subset=["Answer A", "Answer B"] ) display(eval_df) get_ipython().system('pip install wikipedia -q') from llama_index.readers.wikipedia import WikipediaReader train_cities = [ "San Francisco", "Toronto", "New York", "Vancouver", "Montreal", "Boston", ] test_cities = [ "Tokyo", "Singapore", "Paris", ] train_documents = WikipediaReader().load_data( pages=[f"History of {x}" for x in train_cities] ) test_documents = WikipediaReader().load_data( pages=[f"History of {x}" for x in test_cities] ) QUESTION_GEN_PROMPT = ( "You are a Teacher/ Professor. Your task is to setup " "a quiz/examination. Using the provided context, formulate " "a single question that captures an important fact from the " "context. Restrict the question to the context information provided." ) from llama_index.core.evaluation import DatasetGenerator from llama_index.llms.openai import OpenAI llm = OpenAI(model="gpt-3.5-turbo", temperature=0.3) train_dataset_generator = DatasetGenerator.from_documents( train_documents, question_gen_query=QUESTION_GEN_PROMPT, llm=llm, show_progress=True, num_questions_per_chunk=25, ) test_dataset_generator = DatasetGenerator.from_documents( test_documents, question_gen_query=QUESTION_GEN_PROMPT, llm=llm, show_progress=True, num_questions_per_chunk=25, ) train_questions = train_dataset_generator.generate_questions_from_nodes( num=200 ) test_questions = test_dataset_generator.generate_questions_from_nodes(num=150) len(train_questions), len(test_questions) train_questions[:3] test_questions[:3] from llama_index.core import VectorStoreIndex from llama_index.core.retrievers import VectorIndexRetriever train_index = VectorStoreIndex.from_documents(documents=train_documents) train_retriever = VectorIndexRetriever( index=train_index, similarity_top_k=2, ) test_index = VectorStoreIndex.from_documents(documents=test_documents) test_retriever = VectorIndexRetriever( index=test_index, similarity_top_k=2, ) from llama_index.core.query_engine import RetrieverQueryEngine from llama_index.llms.huggingface import HuggingFaceInferenceAPI def create_query_engine( hf_name: str, retriever: VectorIndexRetriever, hf_llm_generators: dict ) -> RetrieverQueryEngine: """Create a RetrieverQueryEngine using the HuggingFaceInferenceAPI LLM""" if hf_name not in hf_llm_generators: raise KeyError("model not listed in hf_llm_generators") llm = HuggingFaceInferenceAPI( model_name=hf_llm_generators[hf_name], context_window=2048, # to use refine token=HUGGING_FACE_TOKEN, ) return RetrieverQueryEngine.from_args(retriever=retriever, llm=llm) hf_llm_generators = { "mistral-7b-instruct": "mistralai/Mistral-7B-Instruct-v0.1", "llama2-7b-chat": "meta-llama/Llama-2-7b-chat-hf", } train_query_engines = { mdl: create_query_engine(mdl, train_retriever, hf_llm_generators) for mdl in hf_llm_generators.keys() } test_query_engines = { mdl: create_query_engine(mdl, test_retriever, hf_llm_generators) for mdl in hf_llm_generators.keys() } import tqdm import random train_dataset = [] for q in tqdm.tqdm(train_questions): model_versus = random.sample(list(train_query_engines.items()), 2) data_entry = {"question": q} responses = [] source = None for name, engine in model_versus: response = engine.query(q) response_struct = {} response_struct["model"] = name response_struct["text"] = str(response) if source is not None: assert source == response.source_nodes[0].node.text[:1000] + "..." else: source = response.source_nodes[0].node.text[:1000] + "..." responses.append(response_struct) data_entry["answers"] = responses data_entry["source"] = source train_dataset.append(data_entry) from llama_index.llms.openai import OpenAI from llama_index.finetuning.callbacks import OpenAIFineTuningHandler from llama_index.core.callbacks import CallbackManager from llama_index.core.evaluation import PairwiseComparisonEvaluator from llama_index.core import Settings main_finetuning_handler = OpenAIFineTuningHandler() callback_manager = CallbackManager([main_finetuning_handler]) Settings.callback_manager = callback_manager llm_4 = OpenAI(temperature=0, model="gpt-4", callback_manager=callback_manager) gpt4_judge =
PairwiseComparisonEvaluator(llm=llm)
llama_index.core.evaluation.PairwiseComparisonEvaluator
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() import os os.environ["OPENAI_API_KEY"] = "sk-..." from llama_index.llms.openai import OpenAI from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.core import Settings Settings.llm = OpenAI(model="gpt-3.5-turbo-1106", temperature=0.2) Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small") from llama_index.core import SimpleDirectoryReader documents = SimpleDirectoryReader("../data/paul_graham").load_data() from llama_index.core import Settings Settings.chunk_size = 1024 nodes = Settings.node_parser.get_nodes_from_documents(documents) from llama_index.core import StorageContext storage_context = StorageContext.from_defaults() storage_context.docstore.add_documents(nodes) from llama_index.core import SummaryIndex from llama_index.core import VectorStoreIndex summary_index = SummaryIndex(nodes, storage_context=storage_context) vector_index = VectorStoreIndex(nodes, storage_context=storage_context) list_query_engine = summary_index.as_query_engine( response_mode="tree_summarize", use_async=True, ) vector_query_engine = vector_index.as_query_engine() from llama_index.core.tools import QueryEngineTool list_tool = QueryEngineTool.from_defaults( query_engine=list_query_engine, description=( "Useful for summarization questions related to Paul Graham eassy on" " What I Worked On." ), ) vector_tool = QueryEngineTool.from_defaults( query_engine=vector_query_engine, description=( "Useful for retrieving specific context from Paul Graham essay on What" " I Worked On." ), ) from llama_index.core.query_engine import RouterQueryEngine from llama_index.core.selectors import LLMSingleSelector, LLMMultiSelector from llama_index.core.selectors import ( PydanticMultiSelector, PydanticSingleSelector, ) query_engine = RouterQueryEngine( selector=
PydanticSingleSelector.from_defaults()
llama_index.core.selectors.PydanticSingleSelector.from_defaults
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') from llama_index.core.llama_dataset import download_llama_dataset rag_dataset, documents = download_llama_dataset( "PaulGrahamEssayDataset", "./paul_graham" ) rag_dataset.to_pandas()[:5] from llama_index.core import VectorStoreIndex index = VectorStoreIndex.from_documents(documents=documents) query_engine = index.as_query_engine() import nest_asyncio nest_asyncio.apply() prediction_dataset = await rag_dataset.amake_predictions_with( query_engine=query_engine, show_progress=True ) prediction_dataset.to_pandas()[:5] import tqdm from llama_index.llms.openai import OpenAI from llama_index.core.evaluation import ( CorrectnessEvaluator, FaithfulnessEvaluator, RelevancyEvaluator, SemanticSimilarityEvaluator, ) judges = {} judges["correctness"] = CorrectnessEvaluator( llm=OpenAI(temperature=0, model="gpt-4"), ) judges["relevancy"] = RelevancyEvaluator( llm=OpenAI(temperature=0, model="gpt-4"), ) judges["faithfulness"] = FaithfulnessEvaluator( llm=OpenAI(temperature=0, model="gpt-4"), ) judges["semantic_similarity"] =
SemanticSimilarityEvaluator()
llama_index.core.evaluation.SemanticSimilarityEvaluator
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') get_ipython().run_line_magic('env', 'OPENAI_API_KEY=YOUR_OPENAI_KEY') get_ipython().system('pip install llama-index pypdf') get_ipython().system("mkdir -p 'data/'") get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') from pathlib import Path from llama_index.readers.file import PDFReader from llama_index.core.response.notebook_utils import display_source_node from llama_index.core.retrievers import RecursiveRetriever from llama_index.core.query_engine import RetrieverQueryEngine from llama_index.core import VectorStoreIndex from llama_index.llms.openai import OpenAI import json loader = PDFReader() docs0 = loader.load_data(file=Path("./data/llama2.pdf")) from llama_index.core import Document doc_text = "\n\n".join([d.get_content() for d in docs0]) docs = [Document(text=doc_text)] from llama_index.core.node_parser import SentenceSplitter from llama_index.core.schema import IndexNode node_parser = SentenceSplitter(chunk_size=1024) base_nodes = node_parser.get_nodes_from_documents(docs) for idx, node in enumerate(base_nodes): node.id_ = f"node-{idx}" from llama_index.core.embeddings import resolve_embed_model embed_model =
resolve_embed_model("local:BAAI/bge-small-en")
llama_index.core.embeddings.resolve_embed_model
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') from llama_index.agent.openai import OpenAIAgent from llama_index.llms.openai import OpenAI from llama_index.core.tools import BaseTool, FunctionTool def multiply(a: int, b: int) -> int: """Multiple two integers and returns the result integer""" return a * b multiply_tool = FunctionTool.from_defaults(fn=multiply) def add(a: int, b: int) -> int: """Add two integers and returns the result integer""" return a + b add_tool = FunctionTool.from_defaults(fn=add) llm = OpenAI(model="gpt-3.5-turbo-1106") agent = OpenAIAgent.from_tools( [multiply_tool, add_tool], llm=llm, verbose=True ) response = agent.chat("What is (121 * 3) + 42?") print(str(response)) response = agent.stream_chat("What is (121 * 3) + 42?") import nest_asyncio nest_asyncio.apply() response = await agent.achat("What is (121 * 3) + 42?") print(str(response)) response = await agent.astream_chat("What is (121 * 3) + 42?") response_gen = response.response_gen async for token in response.async_response_gen(): print(token, end="") import json def get_current_weather(location, unit="fahrenheit"): """Get the current weather in a given location""" if "tokyo" in location.lower(): return json.dumps( {"location": location, "temperature": "10", "unit": "celsius"} ) elif "san francisco" in location.lower(): return json.dumps( {"location": location, "temperature": "72", "unit": "fahrenheit"} ) else: return json.dumps( {"location": location, "temperature": "22", "unit": "celsius"} ) weather_tool = FunctionTool.from_defaults(fn=get_current_weather) llm =
OpenAI(model="gpt-3.5-turbo-1106")
llama_index.llms.openai.OpenAI
import os from PIL import Image from IPython.display import display from llama_index.tools.openai.image_generation import OpenAIImageGenerationToolSpec from llama_index.agent import ReActAgent from llama_index.tools import FunctionTool def show_image(filename: str) -> Image: """Display an image based on the filename""" img = Image.open(filename) return display(img) image_generation_tool = OpenAIImageGenerationToolSpec( api_key=os.environ["OPENAI_API_KEY"] ) show_image_tool =
FunctionTool.from_defaults(fn=show_image)
llama_index.tools.FunctionTool.from_defaults
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().system('pip install llama-index') import os import openai os.environ["OPENAI_API_KEY"] = "sk-..." openai.api_key = os.environ["OPENAI_API_KEY"] import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import VectorStoreIndex from llama_index.core import PromptTemplate from IPython.display import Markdown, display get_ipython().system('mkdir data') get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') from pathlib import Path from llama_index.readers.file import PyMuPDFReader loader = PyMuPDFReader() documents = loader.load(file_path="./data/llama2.pdf") from llama_index.core import VectorStoreIndex from llama_index.llms.openai import OpenAI gpt35_llm = OpenAI(model="gpt-3.5-turbo") gpt4_llm = OpenAI(model="gpt-4") index = VectorStoreIndex.from_documents(documents) query_str = "What are the potential risks associated with the use of Llama 2 as mentioned in the context?" query_engine = index.as_query_engine(similarity_top_k=2, llm=gpt35_llm) vector_retriever = index.as_retriever(similarity_top_k=2) response = query_engine.query(query_str) print(str(response)) def display_prompt_dict(prompts_dict): for k, p in prompts_dict.items(): text_md = f"**Prompt Key**: {k}<br>" f"**Text:** <br>" display(Markdown(text_md)) print(p.get_template()) display(Markdown("<br><br>")) prompts_dict = query_engine.get_prompts() display_prompt_dict(prompts_dict) from langchain import hub langchain_prompt = hub.pull("rlm/rag-prompt") from llama_index.core.prompts import LangchainPromptTemplate lc_prompt_tmpl = LangchainPromptTemplate( template=langchain_prompt, template_var_mappings={"query_str": "question", "context_str": "context"}, ) query_engine.update_prompts( {"response_synthesizer:text_qa_template": lc_prompt_tmpl} ) prompts_dict = query_engine.get_prompts() display_prompt_dict(prompts_dict) response = query_engine.query(query_str) print(str(response)) from llama_index.core.schema import TextNode few_shot_nodes = [] for line in open("../llama2_qa_citation_events.jsonl", "r"): few_shot_nodes.append(TextNode(text=line)) few_shot_index = VectorStoreIndex(few_shot_nodes) few_shot_retriever = few_shot_index.as_retriever(similarity_top_k=2) import json def few_shot_examples_fn(**kwargs): query_str = kwargs["query_str"] retrieved_nodes = few_shot_retriever.retrieve(query_str) result_strs = [] for n in retrieved_nodes: raw_dict = json.loads(n.get_content()) query = raw_dict["query"] response_dict = json.loads(raw_dict["response"]) result_str = f"""\ Query: {query} Response: {response_dict}""" result_strs.append(result_str) return "\n\n".join(result_strs) qa_prompt_tmpl_str = """\ Context information is below. --------------------- {context_str} --------------------- Given the context information and not prior knowledge, \ answer the query asking about citations over different topics. Please provide your answer in the form of a structured JSON format containing \ a list of authors as the citations. Some examples are given below. {few_shot_examples} Query: {query_str} Answer: \ """ qa_prompt_tmpl = PromptTemplate( qa_prompt_tmpl_str, function_mappings={"few_shot_examples": few_shot_examples_fn}, ) citation_query_str = ( "Which citations are mentioned in the section on Safety RLHF?" ) print( qa_prompt_tmpl.format( query_str=citation_query_str, context_str="test_context" ) ) query_engine.update_prompts( {"response_synthesizer:text_qa_template": qa_prompt_tmpl} ) display_prompt_dict(query_engine.get_prompts()) response = query_engine.query(citation_query_str) print(str(response)) print(response.source_nodes[1].get_content()) from llama_index.core.postprocessor import ( NERPIINodePostprocessor, SentenceEmbeddingOptimizer, ) from llama_index.core import QueryBundle from llama_index.core.schema import NodeWithScore, TextNode pii_processor = NERPIINodePostprocessor(llm=gpt4_llm) def filter_pii_fn(**kwargs): query_bundle =
QueryBundle(query_str=kwargs["query_str"])
llama_index.core.QueryBundle
get_ipython().run_line_magic('pip', 'install llama-index-readers-milvus') get_ipython().system('pip install llama-index') import logging import sys import random from llama_index.core import SimpleDirectoryReader, Document from llama_index.readers.milvus import MilvusReader from IPython.display import Markdown, display import textwrap import os os.environ["OPENAI_API_KEY"] = "sk-" reader =
MilvusReader()
llama_index.readers.milvus.MilvusReader
get_ipython().system('pip install llama-index-postprocessor-jinaai-rerank') get_ipython().system('pip install llama-index-embeddings-jinaai') get_ipython().system('pip install llama-index') import os from llama_index.core import ( VectorStoreIndex, SimpleDirectoryReader, ) from llama_index.embeddings.jinaai import JinaEmbedding api_key = os.environ["JINA_API_KEY"] jina_embeddings =
JinaEmbedding(api_key=api_key)
llama_index.embeddings.jinaai.JinaEmbedding
get_ipython().run_line_magic('pip', 'install llama-index-llms-gemini') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-google') get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-google') get_ipython().run_line_magic('pip', 'install llama-index-response-synthesizers-google') get_ipython().run_line_magic('pip', 'install llama-index') get_ipython().run_line_magic('pip', 'install "google-ai-generativelanguage>=0.4,<=1.0"') get_ipython().run_line_magic('pip', 'install google-auth-oauthlib') from google.oauth2 import service_account from llama_index.vector_stores.google import set_google_config credentials = service_account.Credentials.from_service_account_file( "service_account_key.json", scopes=[ "https://www.googleapis.com/auth/generative-language.retriever", ], ) set_google_config(auth_credentials=credentials) get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") import llama_index.core.vector_stores.google.generativeai.genai_extension as genaix from typing import Iterable from random import randrange LLAMA_INDEX_COLAB_CORPUS_ID_PREFIX = f"llama-index-colab" SESSION_CORPUS_ID_PREFIX = ( f"{LLAMA_INDEX_COLAB_CORPUS_ID_PREFIX}-{randrange(1000000)}" ) def corpus_id(num_id: int) -> str: return f"{SESSION_CORPUS_ID_PREFIX}-{num_id}" SESSION_CORPUS_ID = corpus_id(1) def list_corpora() -> Iterable[genaix.Corpus]: client = genaix.build_semantic_retriever() yield from genaix.list_corpora(client=client) def delete_corpus(*, corpus_id: str) -> None: client = genaix.build_semantic_retriever() genaix.delete_corpus(corpus_id=corpus_id, client=client) def cleanup_colab_corpora(): for corpus in list_corpora(): if corpus.corpus_id.startswith(LLAMA_INDEX_COLAB_CORPUS_ID_PREFIX): try: delete_corpus(corpus_id=corpus.corpus_id) print(f"Deleted corpus {corpus.corpus_id}.") except Exception: pass cleanup_colab_corpora() from llama_index.core import SimpleDirectoryReader from llama_index.indices.managed.google import GoogleIndex from llama_index.core import Response import time index = GoogleIndex.create_corpus( corpus_id=SESSION_CORPUS_ID, display_name="My first corpus!" ) print(f"Newly created corpus ID is {index.corpus_id}.") documents = SimpleDirectoryReader("./data/paul_graham/").load_data() index.insert_documents(documents) for corpus in list_corpora(): print(corpus) query_engine = index.as_query_engine() response = query_engine.query("What did Paul Graham do growing up?") assert isinstance(response, Response) print(f"Response is {response.response}") for cited_text in [node.text for node in response.source_nodes]: print(f"Cited text: {cited_text}") if response.metadata: print( f"Answerability: {response.metadata.get('answerable_probability', 0)}" ) index =
GoogleIndex.from_corpus(corpus_id=SESSION_CORPUS_ID)
llama_index.indices.managed.google.GoogleIndex.from_corpus
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-bagel') from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.vector_stores.bagel import BagelVectorStore from llama_index.core import StorageContext from IPython.display import Markdown, display import bagel from bagel import Settings import os import getpass os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:") import openai openai.api_key = os.environ["OPENAI_API_KEY"] get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") server_settings = Settings( bagel_api_impl="rest", bagel_server_host="api.bageldb.ai" ) client = bagel.Client(server_settings) collection = client.get_or_create_cluster("testing_embeddings") embed_model = "local:BAAI/bge-small-en-v1.5" documents =
SimpleDirectoryReader("./data/paul_graham/")
llama_index.core.SimpleDirectoryReader
get_ipython().run_line_magic('pip', 'install llama-index-llms-litellm') get_ipython().system('pip install llama-index') import os from llama_index.llms.litellm import LiteLLM from llama_index.core.llms import ChatMessage os.environ["OPENAI_API_KEY"] = "your-api-key" os.environ["COHERE_API_KEY"] = "your-api-key" message = ChatMessage(role="user", content="Hey! how's it going?") llm = LiteLLM("gpt-3.5-turbo") chat_response = llm.chat([message]) llm =
LiteLLM("command-nightly")
llama_index.llms.litellm.LiteLLM
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-firestore') get_ipython().run_line_magic('pip', 'install llama-index-storage-kvstore-firestore') get_ipython().run_line_magic('pip', 'install llama-index-storage-index-store-firestore') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import SimpleDirectoryReader, StorageContext from llama_index.core import VectorStoreIndex, SimpleKeywordTableIndex from llama_index.core import SummaryIndex from llama_index.core import ComposableGraph from llama_index.llms.openai import OpenAI from llama_index.core.response.notebook_utils import display_response from llama_index.core import Settings get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") reader = SimpleDirectoryReader("./data/paul_graham/") documents = reader.load_data() from llama_index.core.node_parser import SentenceSplitter nodes = SentenceSplitter().get_nodes_from_documents(documents) from llama_index.storage.kvstore.firestore import FirestoreKVStore from llama_index.storage.docstore.firestore import FirestoreDocumentStore from llama_index.storage.index_store.firestore import FirestoreIndexStore kvstore = FirestoreKVStore() storage_context = StorageContext.from_defaults( docstore=FirestoreDocumentStore(kvstore), index_store=FirestoreIndexStore(kvstore), ) storage_context.docstore.add_documents(nodes) summary_index = SummaryIndex(nodes, storage_context=storage_context) vector_index = VectorStoreIndex(nodes, storage_context=storage_context) keyword_table_index = SimpleKeywordTableIndex( nodes, storage_context=storage_context ) len(storage_context.docstore.docs) storage_context.persist() list_id = summary_index.index_id vector_id = vector_index.index_id keyword_id = keyword_table_index.index_id from llama_index.core import load_index_from_storage kvstore =
FirestoreKVStore()
llama_index.storage.kvstore.firestore.FirestoreKVStore
get_ipython().system('pip install llama-index') get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") from llama_index.core import SimpleDirectoryReader reader = SimpleDirectoryReader( input_files=["./data/paul_graham/paul_graham_essay.txt"] ) docs = reader.load_data() text = docs[0].text from llama_index.core.response_synthesizers import TreeSummarize summarizer =
TreeSummarize(verbose=True)
llama_index.core.response_synthesizers.TreeSummarize
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') from llama_index.core.agent import ( CustomSimpleAgentWorker, Task, AgentChatResponse, ) from typing import Dict, Any, List, Tuple, Optional from llama_index.core.tools import BaseTool, QueryEngineTool from llama_index.core.program import LLMTextCompletionProgram from llama_index.core.output_parsers import PydanticOutputParser from llama_index.core.query_engine import RouterQueryEngine from llama_index.core import ChatPromptTemplate, PromptTemplate from llama_index.core.selectors import PydanticSingleSelector from llama_index.core.bridge.pydantic import Field, BaseModel from llama_index.core.llms import ChatMessage, MessageRole DEFAULT_PROMPT_STR = """ Given previous question/response pairs, please determine if an error has occurred in the response, and suggest \ a modified question that will not trigger the error. Examples of modified questions: - The question itself is modified to elicit a non-erroneous response - The question is augmented with context that will help the downstream system better answer the question. - The question is augmented with examples of negative responses, or other negative questions. An error means that either an exception has triggered, or the response is completely irrelevant to the question. Please return the evaluation of the response in the following JSON format. """ def get_chat_prompt_template( system_prompt: str, current_reasoning: Tuple[str, str] ) -> ChatPromptTemplate: system_msg = ChatMessage(role=MessageRole.SYSTEM, content=system_prompt) messages = [system_msg] for raw_msg in current_reasoning: if raw_msg[0] == "user": messages.append( ChatMessage(role=MessageRole.USER, content=raw_msg[1]) ) else: messages.append( ChatMessage(role=MessageRole.ASSISTANT, content=raw_msg[1]) ) return ChatPromptTemplate(message_templates=messages) class ResponseEval(BaseModel): """Evaluation of whether the response has an error.""" has_error: bool = Field( ..., description="Whether the response has an error." ) new_question: str = Field(..., description="The suggested new question.") explanation: str = Field( ..., description=( "The explanation for the error as well as for the new question." "Can include the direct stack trace as well." ), ) from llama_index.core.bridge.pydantic import PrivateAttr class RetryAgentWorker(CustomSimpleAgentWorker): """Agent worker that adds a retry layer on top of a router. Continues iterating until there's no errors / task is done. """ prompt_str: str = Field(default=DEFAULT_PROMPT_STR) max_iterations: int =
Field(default=10)
llama_index.core.bridge.pydantic.Field
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import SimpleDirectoryReader from llama_index.core import VectorStoreIndex, SimpleKeywordTableIndex from llama_index.core import SummaryIndex from llama_index.core import ComposableGraph from llama_index.llms.openai import OpenAI from llama_index.core import Settings get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") reader = SimpleDirectoryReader("./data/paul_graham/") documents = reader.load_data() from llama_index.core.node_parser import SentenceSplitter nodes = SentenceSplitter().get_nodes_from_documents(documents) from llama_index.core.storage.docstore import SimpleDocumentStore docstore =
SimpleDocumentStore()
llama_index.core.storage.docstore.SimpleDocumentStore
import os os.environ["OPENAI_API_KEY"] = "sk-..." from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.core.postprocessor import ( FixedRecencyPostprocessor, EmbeddingRecencyPostprocessor, ) from llama_index.core.node_parser import SentenceSplitter from llama_index.core.storage.docstore import SimpleDocumentStore from llama_index.core.response.notebook_utils import display_response from llama_index.core import StorageContext def get_file_metadata(file_name: str): """Get file metadata.""" if "v1" in file_name: return {"date": "2020-01-01"} elif "v2" in file_name: return {"date": "2020-02-03"} elif "v3" in file_name: return {"date": "2022-04-12"} else: raise ValueError("invalid file") documents = SimpleDirectoryReader( input_files=[ "test_versioned_data/paul_graham_essay_v1.txt", "test_versioned_data/paul_graham_essay_v2.txt", "test_versioned_data/paul_graham_essay_v3.txt", ], file_metadata=get_file_metadata, ).load_data() from llama_index.core import Settings Settings.text_splitter = SentenceSplitter(chunk_size=512) nodes = Settings.text_splitter.get_nodes_from_documents(documents) docstore = SimpleDocumentStore() docstore.add_documents(nodes) storage_context = StorageContext.from_defaults(docstore=docstore) print(documents[2].get_text()) index = VectorStoreIndex(nodes, storage_context=storage_context) node_postprocessor = FixedRecencyPostprocessor() node_postprocessor_emb =
EmbeddingRecencyPostprocessor()
llama_index.core.postprocessor.EmbeddingRecencyPostprocessor
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant') get_ipython().system('pip install llama-index qdrant-client pypdf "transformers[torch]"') import os os.environ["OPENAI_API_KEY"] = "sk-..." get_ipython().system("mkdir -p 'data/'") get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') from llama_index.core import SimpleDirectoryReader documents = SimpleDirectoryReader("./data/").load_data() from llama_index.core import VectorStoreIndex, StorageContext from llama_index.core import Settings from llama_index.vector_stores.qdrant import QdrantVectorStore from qdrant_client import QdrantClient client = QdrantClient(path="./qdrant_data") vector_store = QdrantVectorStore( "llama2_paper", client=client, enable_hybrid=True, batch_size=20 ) storage_context =
StorageContext.from_defaults(vector_store=vector_store)
llama_index.core.StorageContext.from_defaults
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') from llama_index.core.agent import ( CustomSimpleAgentWorker, Task, AgentChatResponse, ) from typing import Dict, Any, List, Tuple, Optional from llama_index.core.tools import BaseTool, QueryEngineTool from llama_index.core.program import LLMTextCompletionProgram from llama_index.core.output_parsers import PydanticOutputParser from llama_index.core.query_engine import RouterQueryEngine from llama_index.core import ChatPromptTemplate, PromptTemplate from llama_index.core.selectors import PydanticSingleSelector from llama_index.core.bridge.pydantic import Field, BaseModel from llama_index.core.llms import ChatMessage, MessageRole DEFAULT_PROMPT_STR = """ Given previous question/response pairs, please determine if an error has occurred in the response, and suggest \ a modified question that will not trigger the error. Examples of modified questions: - The question itself is modified to elicit a non-erroneous response - The question is augmented with context that will help the downstream system better answer the question. - The question is augmented with examples of negative responses, or other negative questions. An error means that either an exception has triggered, or the response is completely irrelevant to the question. Please return the evaluation of the response in the following JSON format. """ def get_chat_prompt_template( system_prompt: str, current_reasoning: Tuple[str, str] ) -> ChatPromptTemplate: system_msg = ChatMessage(role=MessageRole.SYSTEM, content=system_prompt) messages = [system_msg] for raw_msg in current_reasoning: if raw_msg[0] == "user": messages.append( ChatMessage(role=MessageRole.USER, content=raw_msg[1]) ) else: messages.append( ChatMessage(role=MessageRole.ASSISTANT, content=raw_msg[1]) ) return
ChatPromptTemplate(message_templates=messages)
llama_index.core.ChatPromptTemplate
from llama_index import VectorStoreIndex, SimpleDirectoryReader documents = SimpleDirectoryReader( "../../examples/data/paul_graham" ).load_data() index = VectorStoreIndex.from_documents(documents) import pinecone from llama_index import VectorStoreIndex, SimpleDirectoryReader, StorageContext from llama_index.vector_stores import PineconeVectorStore pinecone.init(api_key="<api_key>", environment="<environment>") pinecone.create_index( "quickstart", dimension=1536, metric="euclidean", pod_type="p1" ) storage_context = StorageContext.from_defaults( vector_store=PineconeVectorStore(pinecone.Index("quickstart")) ) documents = SimpleDirectoryReader( "../../examples/data/paul_graham" ).load_data() index = VectorStoreIndex.from_documents( documents, storage_context=storage_context ) vector_store = PineconeVectorStore(pinecone.Index("quickstart")) index = VectorStoreIndex.from_vector_store(vector_store=vector_store) query_engine = index.as_query_engine() response = query_engine.query("What did the author do growing up?") from llama_index.vector_stores.types import ExactMatchFilter, MetadataFilters query_engine = index.as_query_engine( similarity_top_k=3, vector_store_query_mode="default", filters=MetadataFilters( filters=[ ExactMatchFilter(key="name", value="paul graham"), ] ), alpha=None, doc_ids=None, ) response = query_engine.query("what did the author do growing up?") from llama_index import get_response_synthesizer from llama_index.indices.vector_store.retrievers import VectorIndexRetriever from llama_index.query_engine.retriever_query_engine import ( RetrieverQueryEngine, ) retriever = VectorIndexRetriever( index=index, similarity_top_k=3, vector_store_query_mode="default", filters=[ExactMatchFilter(key="name", value="paul graham")], alpha=None, doc_ids=None, ) query_engine = RetrieverQueryEngine( retriever=retriever, response_synthesizer=
get_response_synthesizer()
llama_index.get_response_synthesizer
from IPython.display import Image Image(filename="img/airbyte_1.png") Image(filename="img/github_1.png") Image(filename="img/github_2.png") Image(filename="img/snowflake_1.png") Image(filename="img/snowflake_2.png") Image(filename="img/airbyte_7.png") Image(filename="img/github_3.png") Image(filename="img/airbyte_9.png") Image(filename="img/airbyte_8.png") def snowflake_sqlalchemy_20_monkey_patches(): import sqlalchemy.util.compat sqlalchemy.util.compat.string_types = (str,) sqlalchemy.types.String.RETURNS_UNICODE = True import snowflake.sqlalchemy.snowdialect snowflake.sqlalchemy.snowdialect.SnowflakeDialect.returns_unicode_strings = ( True ) import snowflake.sqlalchemy.snowdialect def has_table(self, connection, table_name, schema=None, info_cache=None): """ Checks if the table exists """ return self._has_object(connection, "TABLE", table_name, schema) snowflake.sqlalchemy.snowdialect.SnowflakeDialect.has_table = has_table try: snowflake_sqlalchemy_20_monkey_patches() except Exception as e: raise ValueError("Please run `pip install snowflake-sqlalchemy`") snowflake_uri = "snowflake://<user_login_name>:<password>@<account_identifier>/<database_name>/<schema_name>?warehouse=<warehouse_name>&role=<role_name>" from sqlalchemy import select, create_engine, MetaData, Table engine = create_engine(snowflake_uri) metadata = MetaData(bind=None) table = Table("ZENDESK_TICKETS", metadata, autoload=True, autoload_with=engine) stmt = select(table.columns) with engine.connect() as connection: results = connection.execute(stmt).fetchone() print(results) print(results.keys()) from llama_index import SQLDatabase sql_database = SQLDatabase(engine) from llama_index.indices.struct_store.sql_query import NLSQLTableQueryEngine from IPython.display import Markdown, display query_engine = NLSQLTableQueryEngine( sql_database=sql_database, tables=["github_issues", "github_comments", "github_users"], ) query_str = "Which issues have the most comments? Give the top 10 and use a join on url." response = query_engine.query(query_str) display(Markdown(f"<b>{response}</b>")) query_engine = NLSQLTableQueryEngine( sql_database=sql_database, synthesize_response=False, tables=["github_issues", "github_comments", "github_users"], ) response = query_engine.query(query_str) display(Markdown(f"<b>{response}</b>")) sql_query = response.metadata["sql_query"] display(Markdown(f"<b>{sql_query}</b>")) from llama_index.indices.struct_store.sql_query import ( SQLTableRetrieverQueryEngine, ) from llama_index.objects import ( SQLTableNodeMapping, ObjectIndex, SQLTableSchema, ) from llama_index import VectorStoreIndex table_node_mapping =
SQLTableNodeMapping(sql_database)
llama_index.objects.SQLTableNodeMapping
get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-pinecone') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().system('pip install llama-index') import pinecone import os api_key = os.environ["PINECONE_API_KEY"] pinecone.init(api_key=api_key, environment="us-west1-gcp") pinecone.create_index( "quickstart", dimension=1536, metric="euclidean", pod_type="p1" ) pinecone_index = pinecone.Index("quickstart") pinecone_index.delete(deleteAll=True) from llama_index.vector_stores.pinecone import PineconeVectorStore vector_store = PineconeVectorStore(pinecone_index=pinecone_index) get_ipython().system('mkdir data') get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') from pathlib import Path from llama_index.readers.file import PyMuPDFReader loader = PyMuPDFReader() documents = loader.load(file_path="./data/llama2.pdf") from llama_index.core import VectorStoreIndex from llama_index.core.node_parser import SentenceSplitter from llama_index.core import StorageContext splitter = SentenceSplitter(chunk_size=1024) storage_context = StorageContext.from_defaults(vector_store=vector_store) index = VectorStoreIndex.from_documents( documents, transformations=[splitter], storage_context=storage_context ) query_str = "Can you tell me about the key concepts for safety finetuning" from llama_index.embeddings.openai import OpenAIEmbedding embed_model = OpenAIEmbedding() query_embedding = embed_model.get_query_embedding(query_str) from llama_index.core.vector_stores import VectorStoreQuery query_mode = "default" vector_store_query = VectorStoreQuery( query_embedding=query_embedding, similarity_top_k=2, mode=query_mode ) query_result = vector_store.query(vector_store_query) query_result from llama_index.core.schema import NodeWithScore from typing import Optional nodes_with_scores = [] for index, node in enumerate(query_result.nodes): score: Optional[float] = None if query_result.similarities is not None: score = query_result.similarities[index] nodes_with_scores.append(NodeWithScore(node=node, score=score)) from llama_index.core.response.notebook_utils import display_source_node for node in nodes_with_scores: display_source_node(node, source_length=1000) from llama_index.core import QueryBundle from llama_index.core.retrievers import BaseRetriever from typing import Any, List class PineconeRetriever(BaseRetriever): """Retriever over a pinecone vector store.""" def __init__( self, vector_store: PineconeVectorStore, embed_model: Any, query_mode: str = "default", similarity_top_k: int = 2, ) -> None: """Init params.""" self._vector_store = vector_store self._embed_model = embed_model self._query_mode = query_mode self._similarity_top_k = similarity_top_k super().__init__() def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]: """Retrieve.""" query_embedding = embed_model.get_query_embedding(query_str) vector_store_query = VectorStoreQuery( query_embedding=query_embedding, similarity_top_k=self._similarity_top_k, mode=self._query_mode, ) query_result = vector_store.query(vector_store_query) nodes_with_scores = [] for index, node in enumerate(query_result.nodes): score: Optional[float] = None if query_result.similarities is not None: score = query_result.similarities[index] nodes_with_scores.append(NodeWithScore(node=node, score=score)) return nodes_with_scores retriever = PineconeRetriever( vector_store, embed_model, query_mode="default", similarity_top_k=2 ) retrieved_nodes = retriever.retrieve(query_str) for node in retrieved_nodes: display_source_node(node, source_length=1000) from llama_index.core.query_engine import RetrieverQueryEngine query_engine =
RetrieverQueryEngine.from_args(retriever)
llama_index.core.query_engine.RetrieverQueryEngine.from_args
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().handlers = [] logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import ( VectorStoreIndex, SimpleDirectoryReader, StorageContext, SimpleKeywordTableIndex, ) from llama_index.core import SummaryIndex from llama_index.core.node_parser import SentenceSplitter from llama_index.llms.openai import OpenAI get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents = SimpleDirectoryReader("./data/paul_graham/").load_data() llm = OpenAI(model="gpt-4") splitter = SentenceSplitter(chunk_size=1024) nodes = splitter.get_nodes_from_documents(documents) storage_context = StorageContext.from_defaults() storage_context.docstore.add_documents(nodes) summary_index = SummaryIndex(nodes, storage_context=storage_context) vector_index = VectorStoreIndex(nodes, storage_context=storage_context) keyword_index =
SimpleKeywordTableIndex(nodes, storage_context=storage_context)
llama_index.core.SimpleKeywordTableIndex
get_ipython().system('pip install llama-index') from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.core.postprocessor import ( PrevNextNodePostprocessor, AutoPrevNextNodePostprocessor, ) from llama_index.core.node_parser import SentenceSplitter from llama_index.core.storage.docstore import SimpleDocumentStore get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") from llama_index.core import StorageContext documents = SimpleDirectoryReader("./data/paul_graham").load_data() from llama_index.core import Settings Settings.chunk_size = 512 nodes = Settings.node_parser.get_nodes_from_documents(documents) docstore = SimpleDocumentStore() docstore.add_documents(nodes) storage_context = StorageContext.from_defaults(docstore=docstore) index = VectorStoreIndex(nodes, storage_context=storage_context) node_postprocessor =
PrevNextNodePostprocessor(docstore=docstore, num_nodes=4)
llama_index.core.postprocessor.PrevNextNodePostprocessor
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import os import openai os.environ["OPENAI_API_KEY"] = "sk-.." openai.api_key = os.environ["OPENAI_API_KEY"] from IPython.display import Markdown, display from sqlalchemy import ( create_engine, MetaData, Table, Column, String, Integer, select, ) engine = create_engine("sqlite:///:memory:") metadata_obj = MetaData() table_name = "city_stats" city_stats_table = Table( table_name, metadata_obj, Column("city_name", String(16), primary_key=True), Column("population", Integer), Column("country", String(16), nullable=False), ) metadata_obj.create_all(engine) from llama_index.core import SQLDatabase from llama_index.llms.openai import OpenAI llm = OpenAI(temperature=0.1, model="gpt-3.5-turbo") sql_database = SQLDatabase(engine, include_tables=["city_stats"]) sql_database = SQLDatabase(engine, include_tables=["city_stats"]) from sqlalchemy import insert rows = [ {"city_name": "Toronto", "population": 2930000, "country": "Canada"}, {"city_name": "Tokyo", "population": 13960000, "country": "Japan"}, { "city_name": "Chicago", "population": 2679000, "country": "United States", }, {"city_name": "Seoul", "population": 9776000, "country": "South Korea"}, ] for row in rows: stmt = insert(city_stats_table).values(**row) with engine.begin() as connection: cursor = connection.execute(stmt) stmt = select( city_stats_table.c.city_name, city_stats_table.c.population, city_stats_table.c.country, ).select_from(city_stats_table) with engine.connect() as connection: results = connection.execute(stmt).fetchall() print(results) from sqlalchemy import text with engine.connect() as con: rows = con.execute(text("SELECT city_name from city_stats")) for row in rows: print(row) from llama_index.core.query_engine import NLSQLTableQueryEngine query_engine = NLSQLTableQueryEngine( sql_database=sql_database, tables=["city_stats"], llm=llm ) query_str = "Which city has the highest population?" response = query_engine.query(query_str) display(Markdown(f"<b>{response}</b>")) from llama_index.core.indices.struct_store.sql_query import ( SQLTableRetrieverQueryEngine, ) from llama_index.core.objects import ( SQLTableNodeMapping, ObjectIndex, SQLTableSchema, ) from llama_index.core import VectorStoreIndex table_node_mapping = SQLTableNodeMapping(sql_database) table_schema_objs = [ (SQLTableSchema(table_name="city_stats")) ] # add a SQLTableSchema for each table obj_index = ObjectIndex.from_objects( table_schema_objs, table_node_mapping, VectorStoreIndex, ) query_engine = SQLTableRetrieverQueryEngine( sql_database, obj_index.as_retriever(similarity_top_k=1) ) response = query_engine.query("Which city has the highest population?") display(Markdown(f"<b>{response}</b>")) response.metadata["result"] city_stats_text = ( "This table gives information regarding the population and country of a" " given city.\nThe user will query with codewords, where 'foo' corresponds" " to population and 'bar'corresponds to city." ) table_node_mapping = SQLTableNodeMapping(sql_database) table_schema_objs = [ (
SQLTableSchema(table_name="city_stats", context_str=city_stats_text)
llama_index.core.objects.SQLTableSchema
from llama_index.core import SQLDatabase from sqlalchemy import ( create_engine, MetaData, Table, Column, String, Integer, select, column, ) engine = create_engine("sqlite:///chinook.db") sql_database = SQLDatabase(engine) from llama_index.core.query_pipeline import QueryPipeline get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('curl "https://www.sqlitetutorial.net/wp-content/uploads/2018/03/chinook.zip" -O ./chinook.zip') get_ipython().system('unzip ./chinook.zip') from llama_index.core.settings import Settings from llama_index.core.callbacks import CallbackManager callback_manager = CallbackManager() Settings.callback_manager = callback_manager import phoenix as px import llama_index.core px.launch_app() llama_index.core.set_global_handler("arize_phoenix") from llama_index.core.query_engine import NLSQLTableQueryEngine from llama_index.core.tools import QueryEngineTool sql_query_engine = NLSQLTableQueryEngine( sql_database=sql_database, tables=["albums", "tracks", "artists"], verbose=True, ) sql_tool = QueryEngineTool.from_defaults( query_engine=sql_query_engine, name="sql_tool", description=( "Useful for translating a natural language query into a SQL query" ), ) from llama_index.core.query_pipeline import QueryPipeline as QP qp = QP(verbose=True) from llama_index.core.agent.react.types import ( ActionReasoningStep, ObservationReasoningStep, ResponseReasoningStep, ) from llama_index.core.agent import Task, AgentChatResponse from llama_index.core.query_pipeline import ( AgentInputComponent, AgentFnComponent, CustomAgentComponent, QueryComponent, ToolRunnerComponent, ) from llama_index.core.llms import MessageRole from typing import Dict, Any, Optional, Tuple, List, cast def agent_input_fn(task: Task, state: Dict[str, Any]) -> Dict[str, Any]: """Agent input function. Returns: A Dictionary of output keys and values. If you are specifying src_key when defining links between this component and other components, make sure the src_key matches the specified output_key. """ if "current_reasoning" not in state: state["current_reasoning"] = [] reasoning_step = ObservationReasoningStep(observation=task.input) state["current_reasoning"].append(reasoning_step) return {"input": task.input} agent_input_component = AgentInputComponent(fn=agent_input_fn) from llama_index.core.agent import ReActChatFormatter from llama_index.core.query_pipeline import InputComponent, Link from llama_index.core.llms import ChatMessage from llama_index.core.tools import BaseTool def react_prompt_fn( task: Task, state: Dict[str, Any], input: str, tools: List[BaseTool] ) -> List[ChatMessage]: chat_formatter = ReActChatFormatter() return chat_formatter.format( tools, chat_history=task.memory.get() + state["memory"].get_all(), current_reasoning=state["current_reasoning"], ) react_prompt_component = AgentFnComponent( fn=react_prompt_fn, partial_dict={"tools": [sql_tool]} ) from typing import Set, Optional from llama_index.core.agent.react.output_parser import ReActOutputParser from llama_index.core.llms import ChatResponse from llama_index.core.agent.types import Task def parse_react_output_fn( task: Task, state: Dict[str, Any], chat_response: ChatResponse ): """Parse ReAct output into a reasoning step.""" output_parser = ReActOutputParser() reasoning_step = output_parser.parse(chat_response.message.content) return {"done": reasoning_step.is_done, "reasoning_step": reasoning_step} parse_react_output = AgentFnComponent(fn=parse_react_output_fn) def run_tool_fn( task: Task, state: Dict[str, Any], reasoning_step: ActionReasoningStep ): """Run tool and process tool output.""" tool_runner_component = ToolRunnerComponent( [sql_tool], callback_manager=task.callback_manager ) tool_output = tool_runner_component.run_component( tool_name=reasoning_step.action, tool_input=reasoning_step.action_input, ) observation_step = ObservationReasoningStep(observation=str(tool_output)) state["current_reasoning"].append(observation_step) return {"response_str": observation_step.get_content(), "is_done": False} run_tool = AgentFnComponent(fn=run_tool_fn) def process_response_fn( task: Task, state: Dict[str, Any], response_step: ResponseReasoningStep ): """Process response.""" state["current_reasoning"].append(response_step) response_str = response_step.response state["memory"].put(ChatMessage(content=task.input, role=MessageRole.USER)) state["memory"].put( ChatMessage(content=response_str, role=MessageRole.ASSISTANT) ) return {"response_str": response_str, "is_done": True} process_response = AgentFnComponent(fn=process_response_fn) def process_agent_response_fn( task: Task, state: Dict[str, Any], response_dict: dict ): """Process agent response.""" return ( AgentChatResponse(response_dict["response_str"]), response_dict["is_done"], ) process_agent_response = AgentFnComponent(fn=process_agent_response_fn) from llama_index.core.query_pipeline import QueryPipeline as QP from llama_index.llms.openai import OpenAI qp.add_modules( { "agent_input": agent_input_component, "react_prompt": react_prompt_component, "llm": OpenAI(model="gpt-4-1106-preview"), "react_output_parser": parse_react_output, "run_tool": run_tool, "process_response": process_response, "process_agent_response": process_agent_response, } ) qp.add_chain(["agent_input", "react_prompt", "llm", "react_output_parser"]) qp.add_link( "react_output_parser", "run_tool", condition_fn=lambda x: not x["done"], input_fn=lambda x: x["reasoning_step"], ) qp.add_link( "react_output_parser", "process_response", condition_fn=lambda x: x["done"], input_fn=lambda x: x["reasoning_step"], ) qp.add_link("process_response", "process_agent_response") qp.add_link("run_tool", "process_agent_response") from pyvis.network import Network net = Network(notebook=True, cdn_resources="in_line", directed=True) net.from_nx(qp.clean_dag) net.show("agent_dag.html") from llama_index.core.agent import QueryPipelineAgentWorker, AgentRunner from llama_index.core.callbacks import CallbackManager agent_worker = QueryPipelineAgentWorker(qp) agent = AgentRunner( agent_worker, callback_manager=CallbackManager([]), verbose=True ) task = agent.create_task( "What are some tracks from the artist AC/DC? Limit it to 3" ) step_output = agent.run_step(task.task_id) step_output = agent.run_step(task.task_id) step_output.is_last response = agent.finalize_response(task.task_id) print(str(response)) agent.reset() response = agent.chat( "What are some tracks from the artist AC/DC? Limit it to 3" ) print(str(response)) from llama_index.llms.openai import OpenAI llm = OpenAI(model="gpt-4-1106-preview") from llama_index.core.agent import Task, AgentChatResponse from typing import Dict, Any from llama_index.core.query_pipeline import ( AgentInputComponent, AgentFnComponent, ) def agent_input_fn(task: Task, state: Dict[str, Any]) -> Dict: """Agent input function.""" if "convo_history" not in state: state["convo_history"] = [] state["count"] = 0 state["convo_history"].append(f"User: {task.input}") convo_history_str = "\n".join(state["convo_history"]) or "None" return {"input": task.input, "convo_history": convo_history_str} agent_input_component = AgentInputComponent(fn=agent_input_fn) from llama_index.core import PromptTemplate retry_prompt_str = """\ You are trying to generate a proper natural language query given a user input. This query will then be interpreted by a downstream text-to-SQL agent which will convert the query to a SQL statement. If the agent triggers an error, then that will be reflected in the current conversation history (see below). If the conversation history is None, use the user input. If its not None, generate a new SQL query that avoids the problems of the previous SQL query. Input: {input} Convo history (failed attempts): {convo_history} New input: """ retry_prompt = PromptTemplate(retry_prompt_str) from llama_index.core import Response from typing import Tuple validate_prompt_str = """\ Given the user query, validate whether the inferred SQL query and response from executing the query is correct and answers the query. Answer with YES or NO. Query: {input} Inferred SQL query: {sql_query} SQL Response: {sql_response} Result: """ validate_prompt = PromptTemplate(validate_prompt_str) MAX_ITER = 3 def agent_output_fn( task: Task, state: Dict[str, Any], output: Response ) -> Tuple[AgentChatResponse, bool]: """Agent output component.""" print(f"> Inferred SQL Query: {output.metadata['sql_query']}") print(f"> SQL Response: {str(output)}") state["convo_history"].append( f"Assistant (inferred SQL query): {output.metadata['sql_query']}" ) state["convo_history"].append(f"Assistant (response): {str(output)}") validate_prompt_partial = validate_prompt.as_query_component( partial={ "sql_query": output.metadata["sql_query"], "sql_response": str(output), } ) qp =
QP(chain=[validate_prompt_partial, llm])
llama_index.core.query_pipeline.QueryPipeline
get_ipython().run_line_magic('pip', 'install llama-index-llms-huggingface') get_ipython().system('pip install "transformers[torch]" "huggingface_hub[inference]"') get_ipython().system('pip install llama-index') import os from typing import List, Optional from llama_index.llms.huggingface import ( HuggingFaceInferenceAPI, HuggingFaceLLM, ) HF_TOKEN: Optional[str] = os.getenv("HUGGING_FACE_TOKEN") locally_run = HuggingFaceLLM(model_name="HuggingFaceH4/zephyr-7b-alpha") remotely_run = HuggingFaceInferenceAPI( model_name="HuggingFaceH4/zephyr-7b-alpha", token=HF_TOKEN ) remotely_run_anon = HuggingFaceInferenceAPI( model_name="HuggingFaceH4/zephyr-7b-alpha" ) remotely_run_recommended =
HuggingFaceInferenceAPI(token=HF_TOKEN)
llama_index.llms.huggingface.HuggingFaceInferenceAPI
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-colbert') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant') get_ipython().run_line_magic('pip', 'install llama-index-llms-gemini') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-gemini') get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-vectara') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-google') get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-google') get_ipython().run_line_magic('pip', 'install llama-index-response-synthesizers-google') get_ipython().run_line_magic('pip', 'install llama-index') get_ipython().run_line_magic('pip', 'install "google-ai-generativelanguage>=0.4,<=1.0"') get_ipython().run_line_magic('pip', 'install torch sentence-transformers') get_ipython().run_line_magic('pip', 'install google-auth-oauthlib') from google.oauth2 import service_account from llama_index.indices.managed.google import GoogleIndex from llama_index.vector_stores.google import set_google_config credentials = service_account.Credentials.from_service_account_file( "service_account_key.json", scopes=[ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/generative-language.retriever", ], ) set_google_config(auth_credentials=credentials) project_name = "TODO-your-project-name" # @param {type:"string"} email = "ht@runllama.ai" # @param {type:"string"} client_file_name = "client_secret.json" get_ipython().system('gcloud config set project $project_name') get_ipython().system('gcloud config set account $email') get_ipython().system('gcloud auth application-default login --no-browser --client-id-file=$client_file_name --scopes="https://www.googleapis.com/auth/generative-language.retriever,https://www.googleapis.com/auth/cloud-platform"') get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") import os GOOGLE_API_KEY = "" # add your GOOGLE API key here os.environ["GOOGLE_API_KEY"] = GOOGLE_API_KEY from llama_index.core import SimpleDirectoryReader from llama_index.indices.managed.google import GoogleIndex google_index =
GoogleIndex.create_corpus(display_name="My first corpus!")
llama_index.indices.managed.google.GoogleIndex.create_corpus
import openai openai.api_key = "sk-your-key" from llama_index.agent import OpenAIAgent from llama_index.tools.graphql.base import GraphQLToolSpec url = "https://spacex-production.up.railway.app/" headers = { "content-type": "application/json", } graphql_spec = GraphQLToolSpec(url=url, headers=headers) agent = OpenAIAgent.from_tools( graphql_spec.to_tool_list(), verbose=True, ) print(agent.chat("get the id, name and type of the Ships from the graphql endpoint")) url = "https://your-store.myshopify.com/admin/api/2023-07/graphql.json" headers = { "accept-language": "en-US,en;q=0.9", "content-type": "application/json", "X-Shopify-Access-Token": "your-admin-key", } graphql_spec =
GraphQLToolSpec(url=url, headers=headers)
llama_index.tools.graphql.base.GraphQLToolSpec
get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-pinecone') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().system('pip install llama-index') import pinecone import os api_key = os.environ["PINECONE_API_KEY"] pinecone.init(api_key=api_key, environment="us-west1-gcp") pinecone.create_index( "quickstart", dimension=1536, metric="euclidean", pod_type="p1" ) pinecone_index = pinecone.Index("quickstart") pinecone_index.delete(deleteAll=True) from llama_index.vector_stores.pinecone import PineconeVectorStore vector_store = PineconeVectorStore(pinecone_index=pinecone_index) get_ipython().system('mkdir data') get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') from pathlib import Path from llama_index.readers.file import PyMuPDFReader loader =
PyMuPDFReader()
llama_index.readers.file.PyMuPDFReader
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-mongodb') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant') get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-firestore') get_ipython().run_line_magic('pip', 'install llama-index-retrievers-bm25') get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-redis') get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-dynamodb') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "./llama2.pdf"') get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/1706.03762.pdf" -O "./attention.pdf"') from llama_index.core import download_loader from llama_index.readers.file import PyMuPDFReader llama2_docs = PyMuPDFReader().load_data( file_path="./llama2.pdf", metadata=True ) attention_docs = PyMuPDFReader().load_data( file_path="./attention.pdf", metadata=True ) import os os.environ["OPENAI_API_KEY"] = "sk-..." from llama_index.core.node_parser import TokenTextSplitter nodes = TokenTextSplitter( chunk_size=1024, chunk_overlap=128 ).get_nodes_from_documents(llama2_docs + attention_docs) from llama_index.core.storage.docstore import SimpleDocumentStore from llama_index.storage.docstore.redis import RedisDocumentStore from llama_index.storage.docstore.mongodb import MongoDocumentStore from llama_index.storage.docstore.firestore import FirestoreDocumentStore from llama_index.storage.docstore.dynamodb import DynamoDBDocumentStore docstore = SimpleDocumentStore() docstore.add_documents(nodes) from llama_index.core import VectorStoreIndex, StorageContext from llama_index.retrievers.bm25 import BM25Retriever from llama_index.vector_stores.qdrant import QdrantVectorStore from qdrant_client import QdrantClient client = QdrantClient(path="./qdrant_data") vector_store = QdrantVectorStore("composable", client=client) storage_context = StorageContext.from_defaults(vector_store=vector_store) index =
VectorStoreIndex(nodes=nodes)
llama_index.core.VectorStoreIndex
get_ipython().run_line_magic('pip', 'install llama-index-llms-gradient') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('pip', 'install llama-index-finetuning') get_ipython().system('pip install llama-index gradientai -q') import os from llama_index.llms.gradient import GradientBaseModelLLM from llama_index.finetuning import GradientFinetuneEngine os.environ["GRADIENT_ACCESS_TOKEN"] = os.getenv("GRADIENT_API_KEY") os.environ["GRADIENT_WORKSPACE_ID"] = "<insert_workspace_id>" from pydantic import BaseModel class Album(BaseModel): """Data model for an album.""" name: str artist: str from llama_index.core.callbacks import CallbackManager, LlamaDebugHandler from llama_index.llms.openai import OpenAI from llama_index.llms.gradient import GradientBaseModelLLM from llama_index.core.program import LLMTextCompletionProgram from llama_index.core.output_parsers import PydanticOutputParser openai_handler =
LlamaDebugHandler()
llama_index.core.callbacks.LlamaDebugHandler
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai-legacy') get_ipython().system('pip install llama-index') import json from typing import Sequence from llama_index.core.tools import BaseTool, FunctionTool def multiply(a: int, b: int) -> int: """Multiply two integers and returns the result integer""" return a * b def add(a: int, b: int) -> int: """Add two integers and returns the result integer""" return a + b def useless(a: int, b: int) -> int: """Toy useless function.""" pass multiply_tool = FunctionTool.from_defaults(fn=multiply, name="multiply") useless_tools = [ FunctionTool.from_defaults(fn=useless, name=f"useless_{str(idx)}") for idx in range(28) ] add_tool = FunctionTool.from_defaults(fn=add, name="add") all_tools = [multiply_tool] + [add_tool] + useless_tools all_tools_map = {t.metadata.name: t for t in all_tools} from llama_index.core import VectorStoreIndex from llama_index.core.objects import ObjectIndex, SimpleToolNodeMapping tool_mapping =
SimpleToolNodeMapping.from_objects(all_tools)
llama_index.core.objects.SimpleToolNodeMapping.from_objects
get_ipython().run_line_magic('pip', 'install llama-index-finetuning') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import nest_asyncio nest_asyncio.apply() get_ipython().system('pip install llama-index') get_ipython().system('pip install spacy') wiki_titles = [ "Toronto", "Seattle", "Chicago", "Boston", "Houston", "Tokyo", "Berlin", "Lisbon", ] from pathlib import Path import requests for title in wiki_titles: response = requests.get( "https://en.wikipedia.org/w/api.php", params={ "action": "query", "format": "json", "titles": title, "prop": "extracts", "explaintext": True, }, ).json() page = next(iter(response["query"]["pages"].values())) wiki_text = page["extract"] data_path = Path("data") if not data_path.exists(): Path.mkdir(data_path) with open(data_path / f"{title}.txt", "w") as fp: fp.write(wiki_text) from llama_index.core import SimpleDirectoryReader city_docs = {} for wiki_title in wiki_titles: city_docs[wiki_title] = SimpleDirectoryReader( input_files=[f"data/{wiki_title}.txt"] ).load_data() from llama_index.llms.openai import OpenAI llm = OpenAI(model="gpt-3.5-turbo", temperature=0.3) city_descs_dict = {} choices = [] choice_to_id_dict = {} for idx, wiki_title in enumerate(wiki_titles): vector_desc = ( "Useful for questions related to specific aspects of" f" {wiki_title} (e.g. the history, arts and culture," " sports, demographics, or more)." ) summary_desc = ( "Useful for any requests that require a holistic summary" f" of EVERYTHING about {wiki_title}. For questions about" " more specific sections, please use the vector_tool." ) doc_id_vector = f"{wiki_title}_vector" doc_id_summary = f"{wiki_title}_summary" city_descs_dict[doc_id_vector] = vector_desc city_descs_dict[doc_id_summary] = summary_desc choices.extend([vector_desc, summary_desc]) choice_to_id_dict[idx * 2] = f"{wiki_title}_vector" choice_to_id_dict[idx * 2 + 1] = f"{wiki_title}_summary" from llama_index.llms.openai import OpenAI from llama_index.core import PromptTemplate llm = OpenAI(model_name="gpt-3.5-turbo") summary_q_tmpl = """\ You are a summary question generator. Given an existing question which asks for a summary of a given topic, \ generate {num_vary} related queries that also ask for a summary of the topic. For example, assuming we're generating 3 related questions: Base Question: Can you tell me more about Boston? Question Variations: Give me an overview of Boston as a city. Can you describe different aspects of Boston, from the history to the sports scene to the food? Write a concise summary of Boston; I've never been. Now let's give it a shot! Base Question: {base_question} Question Variations: """ summary_q_prompt = PromptTemplate(summary_q_tmpl) from collections import defaultdict from llama_index.core.evaluation import DatasetGenerator from llama_index.core.evaluation import EmbeddingQAFinetuneDataset from llama_index.core.node_parser import SimpleNodeParser from tqdm.notebook import tqdm def generate_dataset( wiki_titles, city_descs_dict, llm, summary_q_prompt, num_vector_qs_per_node=2, num_summary_qs=4, ): queries = {} corpus = {} relevant_docs = defaultdict(list) for idx, wiki_title in enumerate(tqdm(wiki_titles)): doc_id_vector = f"{wiki_title}_vector" doc_id_summary = f"{wiki_title}_summary" corpus[doc_id_vector] = city_descs_dict[doc_id_vector] corpus[doc_id_summary] = city_descs_dict[doc_id_summary] node_parser = SimpleNodeParser.from_defaults() nodes = node_parser.get_nodes_from_documents(city_docs[wiki_title]) dataset_generator = DatasetGenerator( nodes, llm=llm, num_questions_per_chunk=num_vector_qs_per_node, ) doc_questions = dataset_generator.generate_questions_from_nodes( num=len(nodes) * num_vector_qs_per_node ) for query_idx, doc_question in enumerate(doc_questions): query_id = f"{wiki_title}_{query_idx}" relevant_docs[query_id] = [doc_id_vector] queries[query_id] = doc_question base_q = f"Give me a summary of {wiki_title}" fmt_prompt = summary_q_prompt.format( num_vary=num_summary_qs, base_question=base_q, ) raw_response = llm.complete(fmt_prompt) raw_lines = str(raw_response).split("\n") doc_summary_questions = [l for l in raw_lines if l != ""] print(f"[{idx}] Original Question: {base_q}") print( f"[{idx}] Generated Question Variations: {doc_summary_questions}" ) for query_idx, doc_summary_question in enumerate( doc_summary_questions ): query_id = f"{wiki_title}_{query_idx}" relevant_docs[query_id] = [doc_id_summary] queries[query_id] = doc_summary_question return EmbeddingQAFinetuneDataset( queries=queries, corpus=corpus, relevant_docs=relevant_docs ) dataset = generate_dataset( wiki_titles, city_descs_dict, llm, summary_q_prompt, num_vector_qs_per_node=4, num_summary_qs=5, ) dataset.save_json("dataset.json") dataset = EmbeddingQAFinetuneDataset.from_json("dataset.json") import random def split_train_val_by_query(dataset, split=0.7): """Split dataset by queries.""" query_ids = list(dataset.queries.keys()) query_ids_shuffled = random.sample(query_ids, len(query_ids)) split_idx = int(len(query_ids) * split) train_query_ids = query_ids_shuffled[:split_idx] eval_query_ids = query_ids_shuffled[split_idx:] train_queries = {qid: dataset.queries[qid] for qid in train_query_ids} eval_queries = {qid: dataset.queries[qid] for qid in eval_query_ids} train_rel_docs = { qid: dataset.relevant_docs[qid] for qid in train_query_ids } eval_rel_docs = {qid: dataset.relevant_docs[qid] for qid in eval_query_ids} train_dataset = EmbeddingQAFinetuneDataset( queries=train_queries, corpus=dataset.corpus, relevant_docs=train_rel_docs, ) eval_dataset = EmbeddingQAFinetuneDataset( queries=eval_queries, corpus=dataset.corpus, relevant_docs=eval_rel_docs, ) return train_dataset, eval_dataset train_dataset, eval_dataset = split_train_val_by_query(dataset, split=0.7) from llama_index.finetuning import SentenceTransformersFinetuneEngine finetune_engine = SentenceTransformersFinetuneEngine( train_dataset, model_id="BAAI/bge-small-en", model_output_path="test_model3", val_dataset=eval_dataset, epochs=30, # can set to higher (haven't tested) ) finetune_engine.finetune() ft_embed_model = finetune_engine.get_finetuned_model() ft_embed_model from llama_index.core.embeddings import resolve_embed_model base_embed_model = resolve_embed_model("local:BAAI/bge-small-en") from llama_index.core.selectors import ( EmbeddingSingleSelector, LLMSingleSelector, ) ft_selector =
EmbeddingSingleSelector.from_defaults(embed_model=ft_embed_model)
llama_index.core.selectors.EmbeddingSingleSelector.from_defaults
get_ipython().system('pip install llama-index') get_ipython().system('pip install promptlayer') import os os.environ["OPENAI_API_KEY"] = "sk-..." os.environ["PROMPTLAYER_API_KEY"] = "pl_..." get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") from llama_index.core import SimpleDirectoryReader docs = SimpleDirectoryReader("./data/paul_graham/").load_data() from llama_index.core import set_global_handler
set_global_handler("promptlayer", pl_tags=["paul graham", "essay"])
llama_index.core.set_global_handler
get_ipython().run_line_magic('pip', 'install llama-index-llms-mistralai') get_ipython().system('pip install llama-index') from llama_index.llms.mistralai import MistralAI llm = MistralAI() resp = llm.complete("Paul Graham is ") print(resp) from llama_index.core.llms import ChatMessage from llama_index.llms.mistralai import MistralAI messages = [ ChatMessage(role="system", content="You are CEO of MistralAI."), ChatMessage(role="user", content="Tell me the story about La plateforme"), ] resp = MistralAI().chat(messages) print(resp) from llama_index.core.llms import ChatMessage from llama_index.llms.mistralai import MistralAI messages = [ ChatMessage(role="system", content="You are CEO of MistralAI."), ChatMessage(role="user", content="Tell me the story about La plateforme"), ] resp =
MistralAI(random_seed=42)
llama_index.llms.mistralai.MistralAI
from llama_index.llms.openai import OpenAI from llama_index.core import VectorStoreIndex from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.core.postprocessor import LLMRerank from llama_index.core import VectorStoreIndex from llama_index.vector_stores.pinecone import PineconeVectorStore from llama_index.core import Settings from llama_index.packs.koda_retriever import KodaRetriever from llama_index.core.evaluation import RetrieverEvaluator from llama_index.core import SimpleDirectoryReader import os from pinecone import Pinecone from llama_index.core.node_parser import SemanticSplitterNodeParser from llama_index.core.ingestion import IngestionPipeline from llama_index.core.retrievers import VectorIndexRetriever from llama_index.core.evaluation import generate_qa_embedding_pairs import pandas as pd pc = Pinecone(api_key=os.environ.get("PINECONE_API_KEY")) index = pc.Index("llama2-paper") # this was previously created in my pinecone account Settings.llm = OpenAI() Settings.embed_model = OpenAIEmbedding() vector_store = PineconeVectorStore(pinecone_index=index) vector_index = VectorStoreIndex.from_vector_store( vector_store=vector_store, embed_model=Settings.embed_model ) reranker = LLMRerank(llm=Settings.llm) # optional koda_retriever = KodaRetriever( index=vector_index, llm=Settings.llm, reranker=reranker, # optional verbose=True, similarity_top_k=10, ) vanilla_retriever = vector_index.as_retriever() pipeline = IngestionPipeline( transformations=[Settings.embed_model], vector_store=vector_store ) def load_documents(file_path, num_pages=None): if num_pages: documents = SimpleDirectoryReader(input_files=[file_path]).load_data()[ :num_pages ] else: documents = SimpleDirectoryReader(input_files=[file_path]).load_data() return documents doc1 = load_documents( "/workspaces/llama_index/llama-index-packs/llama-index-packs-koda-retriever/examples/data/dense_x_retrieval.pdf", num_pages=9, ) doc2 = load_documents( "/workspaces/llama_index/llama-index-packs/llama-index-packs-koda-retriever/examples/data/llama_beyond_english.pdf", num_pages=7, ) doc3 = load_documents( "/workspaces/llama_index/llama-index-packs/llama-index-packs-koda-retriever/examples/data/llm_compiler.pdf", num_pages=12, ) docs = [doc1, doc2, doc3] nodes = list() node_parser = SemanticSplitterNodeParser( embed_model=Settings.embed_model, breakpoint_percentile_threshold=95 ) for doc in docs: _nodes = node_parser.build_semantic_nodes_from_documents( documents=doc, ) nodes.extend(_nodes) pipeline.run(nodes=_nodes) qa_dataset =
generate_qa_embedding_pairs(nodes=nodes, llm=Settings.llm)
llama_index.core.evaluation.generate_qa_embedding_pairs
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import camelot from llama_index.core import VectorStoreIndex from llama_index.core.query_engine import PandasQueryEngine from llama_index.core.schema import IndexNode from llama_index.llms.openai import OpenAI from llama_index.readers.file import PyMuPDFReader from typing import List import os os.environ["OPENAI_API_KEY"] = "YOUR_API_KEY" from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.llms.openai import OpenAI from llama_index.core import Settings Settings.llm = OpenAI(model="gpt-3.5-turbo") Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small") file_path = "billionaires_page.pdf" reader = PyMuPDFReader() docs = reader.load(file_path) def get_tables(path: str, pages: List[int]): table_dfs = [] for page in pages: table_list = camelot.read_pdf(path, pages=str(page)) table_df = table_list[0].df table_df = ( table_df.rename(columns=table_df.iloc[0]) .drop(table_df.index[0]) .reset_index(drop=True) ) table_dfs.append(table_df) return table_dfs table_dfs = get_tables(file_path, pages=[3, 25]) table_dfs[0] table_dfs[1] llm = OpenAI(model="gpt-4") df_query_engines = [ PandasQueryEngine(table_df, llm=llm) for table_df in table_dfs ] response = df_query_engines[0].query( "What's the net worth of the second richest billionaire in 2023?" ) print(str(response)) response = df_query_engines[1].query( "How many billionaires were there in 2009?" ) print(str(response)) from llama_index.core import Settings doc_nodes = Settings.node_parser.get_nodes_from_documents(docs) summaries = [ ( "This node provides information about the world's richest billionaires" " in 2023" ), ( "This node provides information on the number of billionaires and" " their combined net worth from 2000 to 2023." ), ] df_nodes = [ IndexNode(text=summary, index_id=f"pandas{idx}") for idx, summary in enumerate(summaries) ] df_id_query_engine_mapping = { f"pandas{idx}": df_query_engine for idx, df_query_engine in enumerate(df_query_engines) } vector_index = VectorStoreIndex(doc_nodes + df_nodes) vector_retriever = vector_index.as_retriever(similarity_top_k=1) vector_index0 = VectorStoreIndex(doc_nodes) vector_query_engine0 = vector_index0.as_query_engine() from llama_index.core.retrievers import RecursiveRetriever from llama_index.core.query_engine import RetrieverQueryEngine from llama_index.core import get_response_synthesizer recursive_retriever = RecursiveRetriever( "vector", retriever_dict={"vector": vector_retriever}, query_engine_dict=df_id_query_engine_mapping, verbose=True, ) response_synthesizer =
get_response_synthesizer(response_mode="compact")
llama_index.core.get_response_synthesizer
get_ipython().system('pip install -U llama-index-multi-modal-llms-dashscope') get_ipython().run_line_magic('env', 'DASHSCOPE_API_KEY=YOUR_DASHSCOPE_API_KEY') from llama_index.multi_modal_llms.dashscope import ( DashScopeMultiModal, DashScopeMultiModalModels, ) from llama_index.core.multi_modal_llms.generic_utils import load_image_urls image_urls = [ "https://dashscope.oss-cn-beijing.aliyuncs.com/images/dog_and_girl.jpeg", ] image_documents =
load_image_urls(image_urls)
llama_index.core.multi_modal_llms.generic_utils.load_image_urls
import openai openai.api_key = "sk-your-key" from llama_index.agent import OpenAIAgent import requests import yaml f = requests.get( "https://raw.githubusercontent.com/sisbell/chatgpt-plugin-store/main/manifests/today-currency-converter.oiconma.repl.co.json" ).text manifest = yaml.safe_load(f) from llama_index.tools.chatgpt_plugin.base import ChatGPTPluginToolSpec from llama_index.tools.requests.base import RequestsToolSpec requests_spec = RequestsToolSpec() plugin_spec =
ChatGPTPluginToolSpec(manifest)
llama_index.tools.chatgpt_plugin.base.ChatGPTPluginToolSpec
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') from llama_index.core import VectorStoreIndex, SimpleDirectoryReader get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents = SimpleDirectoryReader("./data//paul_graham/").load_data() index = VectorStoreIndex.from_documents(documents) retriever = index.as_retriever() from llama_index.core.query_engine import CustomQueryEngine from llama_index.core.retrievers import BaseRetriever from llama_index.core import get_response_synthesizer from llama_index.core.response_synthesizers import BaseSynthesizer class RAGQueryEngine(CustomQueryEngine): """RAG Query Engine.""" retriever: BaseRetriever response_synthesizer: BaseSynthesizer def custom_query(self, query_str: str): nodes = self.retriever.retrieve(query_str) response_obj = self.response_synthesizer.synthesize(query_str, nodes) return response_obj from llama_index.llms.openai import OpenAI from llama_index.core import PromptTemplate qa_prompt = PromptTemplate( "Context information is below.\n" "---------------------\n" "{context_str}\n" "---------------------\n" "Given the context information and not prior knowledge, " "answer the query.\n" "Query: {query_str}\n" "Answer: " ) class RAGStringQueryEngine(CustomQueryEngine): """RAG String Query Engine.""" retriever: BaseRetriever response_synthesizer: BaseSynthesizer llm: OpenAI qa_prompt: PromptTemplate def custom_query(self, query_str: str): nodes = self.retriever.retrieve(query_str) context_str = "\n\n".join([n.node.get_content() for n in nodes]) response = self.llm.complete( qa_prompt.format(context_str=context_str, query_str=query_str) ) return str(response) synthesizer =
get_response_synthesizer(response_mode="compact")
llama_index.core.get_response_synthesizer
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-retrievers-bm25') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() import os import openai os.environ["OPENAI_API_KEY"] = "sk-..." openai.api_key = os.environ["OPENAI_API_KEY"] import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().handlers = [] logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import ( SimpleDirectoryReader, StorageContext, VectorStoreIndex, ) from llama_index.retrievers.bm25 import BM25Retriever from llama_index.core.retrievers import VectorIndexRetriever from llama_index.core.node_parser import SentenceSplitter from llama_index.llms.openai import OpenAI get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents = SimpleDirectoryReader("./data/paul_graham").load_data() llm = OpenAI(model="gpt-4") splitter = SentenceSplitter(chunk_size=1024) nodes = splitter.get_nodes_from_documents(documents) storage_context = StorageContext.from_defaults() storage_context.docstore.add_documents(nodes) index = VectorStoreIndex( nodes=nodes, storage_context=storage_context, ) retriever = BM25Retriever.from_defaults(nodes=nodes, similarity_top_k=2) from llama_index.core.response.notebook_utils import display_source_node nodes = retriever.retrieve("What happened at Viaweb and Interleaf?") for node in nodes: display_source_node(node) nodes = retriever.retrieve("What did Paul Graham do after RISD?") for node in nodes: display_source_node(node) from llama_index.core.tools import RetrieverTool vector_retriever = VectorIndexRetriever(index) bm25_retriever =
BM25Retriever.from_defaults(nodes=nodes, similarity_top_k=2)
llama_index.retrievers.bm25.BM25Retriever.from_defaults
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().run_line_magic('pip', 'install llama-index-graph-stores-nebula') get_ipython().run_line_magic('pip', 'install llama-index-llms-azure-openai') import os os.environ["OPENAI_API_KEY"] = "INSERT OPENAI KEY" import logging import sys from llama_index.llms.openai import OpenAI from llama_index.core import Settings logging.basicConfig(stream=sys.stdout, level=logging.INFO) llm = OpenAI(temperature=0, model="gpt-3.5-turbo") Settings.llm = llm Settings.chunk_size = 512 import os import json import openai from llama_index.llms.azure_openai import AzureOpenAI from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.core import ( VectorStoreIndex, SimpleDirectoryReader, KnowledgeGraphIndex, ) from llama_index.core import StorageContext from llama_index.graph_stores.nebula import NebulaGraphStore import logging import sys from IPython.display import Markdown, display logging.basicConfig( stream=sys.stdout, level=logging.INFO ) # logging.DEBUG for more verbose output logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) openai.api_type = "azure" openai.api_base = "https://<foo-bar>.openai.azure.com" openai.api_version = "2022-12-01" os.environ["OPENAI_API_KEY"] = "<your-openai-key>" openai.api_key = os.getenv("OPENAI_API_KEY") llm = AzureOpenAI( model="<foo-bar-model>", engine="<foo-bar-deployment>", temperature=0, api_key=openai.api_key, api_type=openai.api_type, api_base=openai.api_base, api_version=openai.api_version, ) embedding_model = OpenAIEmbedding( model="text-embedding-ada-002", deployment_name="<foo-bar-deployment>", api_key=openai.api_key, api_base=openai.api_base, api_type=openai.api_type, api_version=openai.api_version, ) Settings.llm = llm Settings.chunk_size = chunk_size Settings.embed_model = embedding_model from llama_index.core import KnowledgeGraphIndex, SimpleDirectoryReader from llama_index.core import StorageContext from llama_index.graph_stores.nebula import NebulaGraphStore from llama_index.llms.openai import OpenAI from IPython.display import Markdown, display documents = SimpleDirectoryReader( "../../../../examples/paul_graham_essay/data" ).load_data() get_ipython().run_line_magic('pip', 'install nebula3-python') os.environ["NEBULA_USER"] = "root" os.environ[ "NEBULA_PASSWORD" ] = "<password>" # replace with your password, by default it is "nebula" os.environ[ "NEBULA_ADDRESS" ] = "127.0.0.1:9669" # assumed we have NebulaGraph 3.5.0 or newer installed locally space_name = "paul_graham_essay" edge_types, rel_prop_names = ["relationship"], [ "relationship" ] # default, could be omit if create from an empty kg tags = ["entity"] # default, could be omit if create from an empty kg graph_store = NebulaGraphStore( space_name=space_name, edge_types=edge_types, rel_prop_names=rel_prop_names, tags=tags, ) storage_context = StorageContext.from_defaults(graph_store=graph_store) index = KnowledgeGraphIndex.from_documents( documents, storage_context=storage_context, max_triplets_per_chunk=2, space_name=space_name, edge_types=edge_types, rel_prop_names=rel_prop_names, tags=tags, ) query_engine = index.as_query_engine() response = query_engine.query("Tell me more about Interleaf") display(Markdown(f"<b>{response}</b>")) response = query_engine.query( "Tell me more about what the author worked on at Interleaf" ) display(Markdown(f"<b>{response}</b>")) get_ipython().run_line_magic('pip', 'install ipython-ngql networkx pyvis') get_ipython().run_line_magic('load_ext', 'ngql') get_ipython().run_line_magic('ngql', '--address 127.0.0.1 --port 9669 --user root --password <password>') get_ipython().run_cell_magic('ngql', '', "USE paul_graham_essay;\nMATCH p=(n)-[*1..2]-()\n WHERE id(n) IN ['Interleaf', 'history', 'Software', 'Company'] \nRETURN p LIMIT 100;\n") get_ipython().run_line_magic('ng_draw', '') index = KnowledgeGraphIndex.from_documents( documents, storage_context=storage_context, max_triplets_per_chunk=2, space_name=space_name, edge_types=edge_types, rel_prop_names=rel_prop_names, tags=tags, include_embeddings=True, ) query_engine = index.as_query_engine( include_text=True, response_mode="tree_summarize", embedding_mode="hybrid", similarity_top_k=5, ) response = query_engine.query( "Tell me more about what the author worked on at Interleaf" ) display(Markdown(f"<b>{response}</b>")) query_engine = index.as_query_engine( include_text=True, response_mode="tree_summarize", embedding_mode="hybrid", similarity_top_k=5, explore_global_knowledge=True, ) response = query_engine.query("Tell me more about what the author and Lisp") from pyvis.network import Network g = index.get_networkx_graph() net = Network(notebook=True, cdn_resources="in_line", directed=True) net.from_nx(g) net.show("example.html") from llama_index.core.node_parser import SentenceSplitter node_parser = SentenceSplitter() nodes = node_parser.get_nodes_from_documents(documents) index =
KnowledgeGraphIndex.from_documents([], storage_context=storage_context)
llama_index.core.KnowledgeGraphIndex.from_documents
get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant') get_ipython().run_line_magic('pip', 'install llama_index ftfy regex tqdm') get_ipython().run_line_magic('pip', 'install git+https://github.com/openai/CLIP.git') get_ipython().run_line_magic('pip', 'install torch torchvision') get_ipython().run_line_magic('pip', 'install matplotlib scikit-image') get_ipython().run_line_magic('pip', 'install -U qdrant_client') import os OPENAI_API_TOKEN = "" os.environ["OPENAI_API_KEY"] = OPENAI_API_TOKEN from pathlib import Path input_image_path = Path("input_images") if not input_image_path.exists(): Path.mkdir(input_image_path) get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1nUhsBRiSWxcVQv8t8Cvvro8HJZ88LCzj" -O ./input_images/long_range_spec.png') get_ipython().system('wget "https://docs.google.com/uc?export=download&id=19pLwx0nVqsop7lo0ubUSYTzQfMtKJJtJ" -O ./input_images/model_y.png') get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1utu3iD9XEgR5Sb7PrbtMf1qw8T1WdNmF" -O ./input_images/performance_spec.png') get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1dpUakWMqaXR4Jjn1kHuZfB0pAXvjn2-i" -O ./input_images/price.png') get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1qNeT201QAesnAP5va1ty0Ky5Q_jKkguV" -O ./input_images/real_wheel_spec.png') from PIL import Image import matplotlib.pyplot as plt import os image_paths = [] for img_path in os.listdir("./input_images"): image_paths.append(str(os.path.join("./input_images", img_path))) def plot_images(image_paths): images_shown = 0 plt.figure(figsize=(16, 9)) for img_path in image_paths: if os.path.isfile(img_path): image = Image.open(img_path) plt.subplot(2, 3, images_shown + 1) plt.imshow(image) plt.xticks([]) plt.yticks([]) images_shown += 1 if images_shown >= 9: break plot_images(image_paths) from llama_index.multi_modal_llms.openai import OpenAIMultiModal from llama_index.core import SimpleDirectoryReader image_documents = SimpleDirectoryReader("./input_images").load_data() openai_mm_llm = OpenAIMultiModal( model="gpt-4-vision-preview", api_key=OPENAI_API_TOKEN, max_new_tokens=1500 ) response_1 = openai_mm_llm.complete( prompt="Describe the images as an alternative text", image_documents=image_documents, ) print(response_1) response_2 = openai_mm_llm.complete( prompt="Can you tell me what is the price with each spec?", image_documents=image_documents, ) print(response_2) import requests def get_wikipedia_images(title): response = requests.get( "https://en.wikipedia.org/w/api.php", params={ "action": "query", "format": "json", "titles": title, "prop": "imageinfo", "iiprop": "url|dimensions|mime", "generator": "images", "gimlimit": "50", }, ).json() image_urls = [] for page in response["query"]["pages"].values(): if page["imageinfo"][0]["url"].endswith(".jpg") or page["imageinfo"][ 0 ]["url"].endswith(".png"): image_urls.append(page["imageinfo"][0]["url"]) return image_urls from pathlib import Path import requests import urllib.request image_uuid = 0 image_metadata_dict = {} MAX_IMAGES_PER_WIKI = 20 wiki_titles = { "Tesla Model Y", "Tesla Model X", "Tesla Model 3", "Tesla Model S", "Kia EV6", "BMW i3", "Audi e-tron", "Ford Mustang", "Porsche Taycan", "Rivian", "Polestar", } data_path = Path("mixed_wiki") if not data_path.exists(): Path.mkdir(data_path) for title in wiki_titles: response = requests.get( "https://en.wikipedia.org/w/api.php", params={ "action": "query", "format": "json", "titles": title, "prop": "extracts", "explaintext": True, }, ).json() page = next(iter(response["query"]["pages"].values())) wiki_text = page["extract"] with open(data_path / f"{title}.txt", "w") as fp: fp.write(wiki_text) images_per_wiki = 0 try: list_img_urls = get_wikipedia_images(title) for url in list_img_urls: if ( url.endswith(".jpg") or url.endswith(".png") or url.endswith(".svg") ): image_uuid += 1 urllib.request.urlretrieve( url, data_path / f"{image_uuid}.jpg" ) images_per_wiki += 1 if images_per_wiki > MAX_IMAGES_PER_WIKI: break except: print(str(Exception("No images found for Wikipedia page: ")) + title) continue get_ipython().system('wget "https://www.dropbox.com/scl/fi/mlaymdy1ni1ovyeykhhuk/tesla_2021_10k.htm?rlkey=qf9k4zn0ejrbm716j0gg7r802&dl=1" -O ./mixed_wiki/tesla_2021_10k.htm') from llama_index.core.indices import MultiModalVectorStoreIndex from llama_index.vector_stores.qdrant import QdrantVectorStore from llama_index.core import SimpleDirectoryReader, StorageContext import qdrant_client from llama_index.core import SimpleDirectoryReader client = qdrant_client.QdrantClient(path="qdrant_mm_db") text_store = QdrantVectorStore( client=client, collection_name="text_collection" ) image_store = QdrantVectorStore( client=client, collection_name="image_collection" ) storage_context = StorageContext.from_defaults( vector_store=text_store, image_store=image_store ) documents = SimpleDirectoryReader("./mixed_wiki/").load_data() index = MultiModalVectorStoreIndex.from_documents( documents, storage_context=storage_context, ) from llama_index.core import load_index_from_storage print(response_2.text) MAX_TOKENS = 50 retriever_engine = index.as_retriever( similarity_top_k=3, image_similarity_top_k=3 ) retrieval_results = retriever_engine.retrieve(response_2.text[:MAX_TOKENS]) from llama_index.core.response.notebook_utils import display_source_node from llama_index.core.schema import ImageNode retrieved_image = [] for res_node in retrieval_results: if isinstance(res_node.node, ImageNode): retrieved_image.append(res_node.node.metadata["file_path"]) else: display_source_node(res_node, source_length=200) plot_images(retrieved_image) response_3 = openai_mm_llm.complete( prompt="what are other similar cars?", image_documents=image_documents, ) print(response_3) from llama_index.core import PromptTemplate from llama_index.core.query_engine import SimpleMultiModalQueryEngine qa_tmpl_str = ( "Context information is below.\n" "---------------------\n" "{context_str}\n" "---------------------\n" "Given the context information and not prior knowledge, " "answer the query.\n" "Query: {query_str}\n" "Answer: " ) qa_tmpl =
PromptTemplate(qa_tmpl_str)
llama_index.core.PromptTemplate
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-vectara') get_ipython().system('pip install llama-index') import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core.schema import TextNode from llama_index.core.indices.managed.types import ManagedIndexQueryMode from llama_index.indices.managed.vectara import VectaraIndex from llama_index.indices.managed.vectara import VectaraAutoRetriever from llama_index.core.vector_stores import MetadataInfo, VectorStoreInfo from llama_index.llms.openai import OpenAI nodes = [ TextNode( text=( "A pragmatic paleontologist touring an almost complete theme park on an island " + "in Central America is tasked with protecting a couple of kids after a power " + "failure causes the park's cloned dinosaurs to run loose." ), metadata={"year": 1993, "rating": 7.7, "genre": "science fiction"}, ),
TextNode( text=( "A thief who steals corporate secrets through the use of dream-sharing technology " + "is given the inverse task of planting an idea into the mind of a C.E.O., " + "but his tragic past may doom the project and his team to disaster." )
llama_index.core.schema.TextNode
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt' -O pg_essay.txt") from llama_index.core import SimpleDirectoryReader reader = SimpleDirectoryReader(input_files=["pg_essay.txt"]) documents = reader.load_data() from llama_index.core.query_pipeline import QueryPipeline, InputComponent from typing import Dict, Any, List, Optional from llama_index.llms.openai import OpenAI from llama_index.core import Document, VectorStoreIndex from llama_index.core import SummaryIndex from llama_index.core.response_synthesizers import TreeSummarize from llama_index.core.schema import NodeWithScore, TextNode from llama_index.core import PromptTemplate from llama_index.core.selectors import LLMSingleSelector hyde_str = """\ Please write a passage to answer the question: {query_str} Try to include as many key details as possible. Passage: """ hyde_prompt = PromptTemplate(hyde_str) llm = OpenAI(model="gpt-3.5-turbo") summarizer = TreeSummarize(llm=llm) vector_index = VectorStoreIndex.from_documents(documents) vector_query_engine = vector_index.as_query_engine(similarity_top_k=2) summary_index = SummaryIndex.from_documents(documents) summary_qrewrite_str = """\ Here's a question: {query_str} You are responsible for feeding the question to an agent that given context will try to answer the question. The context may or may not be relevant. Rewrite the question to highlight the fact that only some pieces of context (or none) maybe be relevant. """ summary_qrewrite_prompt = PromptTemplate(summary_qrewrite_str) summary_query_engine = summary_index.as_query_engine() selector =
LLMSingleSelector.from_defaults()
llama_index.core.selectors.LLMSingleSelector.from_defaults
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') from llama_index.core.callbacks import ( CallbackManager, LlamaDebugHandler, CBEventType, ) get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") from llama_index.core import SimpleDirectoryReader docs = SimpleDirectoryReader("./data/paul_graham/").load_data() from llama_index.llms.openai import OpenAI llm = OpenAI(model="gpt-3.5-turbo", temperature=0) llama_debug =
LlamaDebugHandler(print_trace_on_end=True)
llama_index.core.callbacks.LlamaDebugHandler
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader from llama_index.core.postprocessor import TimeWeightedPostprocessor from llama_index.core.node_parser import SentenceSplitter from llama_index.core.storage.docstore import SimpleDocumentStore from llama_index.core.response.notebook_utils import display_response from datetime import datetime, timedelta from llama_index.core import StorageContext now = datetime.now() key = "__last_accessed__" doc1 = SimpleDirectoryReader( input_files=["./test_versioned_data/paul_graham_essay_v1.txt"] ).load_data()[0] doc2 = SimpleDirectoryReader( input_files=["./test_versioned_data/paul_graham_essay_v2.txt"] ).load_data()[0] doc3 = SimpleDirectoryReader( input_files=["./test_versioned_data/paul_graham_essay_v3.txt"] ).load_data()[0] from llama_index.core import Settings Settings.text_splitter = SentenceSplitter(chunk_size=512) nodes1 = Settings.text_splitter.get_nodes_from_documents([doc1]) nodes2 =
Settings.text_splitter.get_nodes_from_documents([doc2])
llama_index.core.Settings.text_splitter.get_nodes_from_documents
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-web') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() import os import openai from llama_index.core import set_global_handler set_global_handler("wandb", run_args={"project": "llamaindex"}) os.environ["OPENAI_API_KEY"] = "sk-..." openai.api_key = os.environ["OPENAI_API_KEY"] from llama_index.llms.openai import OpenAI from llama_index.core.schema import MetadataMode llm = OpenAI(temperature=0.1, model="gpt-3.5-turbo", max_tokens=512) from llama_index.core.node_parser import TokenTextSplitter from llama_index.core.extractors import ( SummaryExtractor, QuestionsAnsweredExtractor, ) node_parser = TokenTextSplitter( separator=" ", chunk_size=256, chunk_overlap=128 ) extractors_1 = [ QuestionsAnsweredExtractor( questions=3, llm=llm, metadata_mode=MetadataMode.EMBED ), ] extractors_2 = [ SummaryExtractor(summaries=["prev", "self", "next"], llm=llm), QuestionsAnsweredExtractor( questions=3, llm=llm, metadata_mode=MetadataMode.EMBED ), ] from llama_index.core import SimpleDirectoryReader from llama_index.readers.web import SimpleWebPageReader reader = SimpleWebPageReader(html_to_text=True) docs = reader.load_data(urls=["https://eugeneyan.com/writing/llm-patterns/"]) print(docs[0].get_content()) orig_nodes = node_parser.get_nodes_from_documents(docs) nodes = orig_nodes[20:28] print(nodes[3].get_content(metadata_mode="all")) from llama_index.core.ingestion import IngestionPipeline pipeline =
IngestionPipeline(transformations=[node_parser, *extractors_1])
llama_index.core.ingestion.IngestionPipeline
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') from llama_index.core.agent import ( CustomSimpleAgentWorker, Task, AgentChatResponse, ) from typing import Dict, Any, List, Tuple, Optional from llama_index.core.tools import BaseTool, QueryEngineTool from llama_index.core.program import LLMTextCompletionProgram from llama_index.core.output_parsers import PydanticOutputParser from llama_index.core.query_engine import RouterQueryEngine from llama_index.core import ChatPromptTemplate, PromptTemplate from llama_index.core.selectors import PydanticSingleSelector from llama_index.core.bridge.pydantic import Field, BaseModel from llama_index.core.llms import ChatMessage, MessageRole DEFAULT_PROMPT_STR = """ Given previous question/response pairs, please determine if an error has occurred in the response, and suggest \ a modified question that will not trigger the error. Examples of modified questions: - The question itself is modified to elicit a non-erroneous response - The question is augmented with context that will help the downstream system better answer the question. - The question is augmented with examples of negative responses, or other negative questions. An error means that either an exception has triggered, or the response is completely irrelevant to the question. Please return the evaluation of the response in the following JSON format. """ def get_chat_prompt_template( system_prompt: str, current_reasoning: Tuple[str, str] ) -> ChatPromptTemplate: system_msg = ChatMessage(role=MessageRole.SYSTEM, content=system_prompt) messages = [system_msg] for raw_msg in current_reasoning: if raw_msg[0] == "user": messages.append( ChatMessage(role=MessageRole.USER, content=raw_msg[1]) ) else: messages.append( ChatMessage(role=MessageRole.ASSISTANT, content=raw_msg[1]) ) return ChatPromptTemplate(message_templates=messages) class ResponseEval(BaseModel): """Evaluation of whether the response has an error.""" has_error: bool = Field( ..., description="Whether the response has an error." ) new_question: str = Field(..., description="The suggested new question.") explanation: str = Field( ..., description=( "The explanation for the error as well as for the new question." "Can include the direct stack trace as well." ), ) from llama_index.core.bridge.pydantic import PrivateAttr class RetryAgentWorker(CustomSimpleAgentWorker): """Agent worker that adds a retry layer on top of a router. Continues iterating until there's no errors / task is done. """ prompt_str: str = Field(default=DEFAULT_PROMPT_STR) max_iterations: int = Field(default=10) _router_query_engine: RouterQueryEngine = PrivateAttr() def __init__(self, tools: List[BaseTool], **kwargs: Any) -> None: """Init params.""" for tool in tools: if not isinstance(tool, QueryEngineTool): raise ValueError( f"Tool {tool.metadata.name} is not a query engine tool." ) self._router_query_engine = RouterQueryEngine( selector=PydanticSingleSelector.from_defaults(), query_engine_tools=tools, verbose=kwargs.get("verbose", False), ) super().__init__( tools=tools, **kwargs, ) def _initialize_state(self, task: Task, **kwargs: Any) -> Dict[str, Any]: """Initialize state.""" return {"count": 0, "current_reasoning": []} def _run_step( self, state: Dict[str, Any], task: Task, input: Optional[str] = None ) -> Tuple[AgentChatResponse, bool]: """Run step. Returns: Tuple of (agent_response, is_done) """ if "new_input" not in state: new_input = task.input else: new_input = state["new_input"] response = self._router_query_engine.query(new_input) state["current_reasoning"].extend( [("user", new_input), ("assistant", str(response))] ) chat_prompt_tmpl = get_chat_prompt_template( self.prompt_str, state["current_reasoning"] ) llm_program = LLMTextCompletionProgram.from_defaults( output_parser=PydanticOutputParser(output_cls=ResponseEval), prompt=chat_prompt_tmpl, llm=self.llm, ) response_eval = llm_program( query_str=new_input, response_str=str(response) ) if not response_eval.has_error: is_done = True else: is_done = False state["new_input"] = response_eval.new_question if self.verbose: print(f"> Question: {new_input}") print(f"> Response: {response}") print(f"> Response eval: {response_eval.dict()}") return AgentChatResponse(response=str(response)), is_done def _finalize_task(self, state: Dict[str, Any], **kwargs) -> None: """Finalize task.""" pass from llama_index.core.tools import QueryEngineTool from sqlalchemy import ( create_engine, MetaData, Table, Column, String, Integer, select, column, ) from llama_index.core import SQLDatabase engine = create_engine("sqlite:///:memory:", future=True) metadata_obj = MetaData() table_name = "city_stats" city_stats_table = Table( table_name, metadata_obj, Column("city_name", String(16), primary_key=True), Column("population", Integer), Column("country", String(16), nullable=False), ) metadata_obj.create_all(engine) from sqlalchemy import insert rows = [ {"city_name": "Toronto", "population": 2930000, "country": "Canada"}, {"city_name": "Tokyo", "population": 13960000, "country": "Japan"}, {"city_name": "Berlin", "population": 3645000, "country": "Germany"}, ] for row in rows: stmt = insert(city_stats_table).values(**row) with engine.begin() as connection: cursor = connection.execute(stmt) from llama_index.core.query_engine import NLSQLTableQueryEngine sql_database =
SQLDatabase(engine, include_tables=["city_stats"])
llama_index.core.SQLDatabase
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-mongodb') get_ipython().run_line_magic('pip', 'install llama-index-storage-index-store-mongodb') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() import logging import sys import os logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import SimpleDirectoryReader, StorageContext from llama_index.core import VectorStoreIndex, SimpleKeywordTableIndex from llama_index.core import SummaryIndex from llama_index.core import ComposableGraph from llama_index.llms.openai import OpenAI from llama_index.core.response.notebook_utils import display_response from llama_index.core import Settings get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") reader = SimpleDirectoryReader("./data/paul_graham/") documents = reader.load_data() from llama_index.core.node_parser import SentenceSplitter nodes = SentenceSplitter().get_nodes_from_documents(documents) MONGO_URI = os.environ["MONGO_URI"] from llama_index.storage.docstore.mongodb import MongoDocumentStore from llama_index.storage.index_store.mongodb import MongoIndexStore storage_context = StorageContext.from_defaults( docstore=MongoDocumentStore.from_uri(uri=MONGO_URI), index_store=MongoIndexStore.from_uri(uri=MONGO_URI), ) storage_context.docstore.add_documents(nodes) summary_index = SummaryIndex(nodes, storage_context=storage_context) vector_index = VectorStoreIndex(nodes, storage_context=storage_context) keyword_table_index = SimpleKeywordTableIndex( nodes, storage_context=storage_context ) len(storage_context.docstore.docs) storage_context.persist() list_id = summary_index.index_id vector_id = vector_index.index_id keyword_id = keyword_table_index.index_id from llama_index.core import load_index_from_storage storage_context = StorageContext.from_defaults( docstore=MongoDocumentStore.from_uri(uri=MONGO_URI), index_store=
MongoIndexStore.from_uri(uri=MONGO_URI)
llama_index.storage.index_store.mongodb.MongoIndexStore.from_uri
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-retrievers-bm25') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() import os import openai os.environ["OPENAI_API_KEY"] = "sk-..." openai.api_key = os.environ["OPENAI_API_KEY"] import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().handlers = [] logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import ( SimpleDirectoryReader, StorageContext, VectorStoreIndex, ) from llama_index.retrievers.bm25 import BM25Retriever from llama_index.core.retrievers import VectorIndexRetriever from llama_index.core.node_parser import SentenceSplitter from llama_index.llms.openai import OpenAI get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents = SimpleDirectoryReader("./data/paul_graham").load_data() llm = OpenAI(model="gpt-4") splitter = SentenceSplitter(chunk_size=1024) nodes = splitter.get_nodes_from_documents(documents) storage_context = StorageContext.from_defaults() storage_context.docstore.add_documents(nodes) index = VectorStoreIndex( nodes=nodes, storage_context=storage_context, ) retriever = BM25Retriever.from_defaults(nodes=nodes, similarity_top_k=2) from llama_index.core.response.notebook_utils import display_source_node nodes = retriever.retrieve("What happened at Viaweb and Interleaf?") for node in nodes: display_source_node(node) nodes = retriever.retrieve("What did Paul Graham do after RISD?") for node in nodes: display_source_node(node) from llama_index.core.tools import RetrieverTool vector_retriever =
VectorIndexRetriever(index)
llama_index.core.retrievers.VectorIndexRetriever
get_ipython().system('pip install llama-index') import os import openai os.environ["OPENAI_API_KEY"] = "sk-..." openai.api_key = os.environ["OPENAI_API_KEY"] import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import ( VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext, ) from IPython.display import Markdown, display get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents = SimpleDirectoryReader("./data/paul_graham/").load_data() index = VectorStoreIndex.from_documents(documents) query_engine = index.as_query_engine(response_mode="tree_summarize") def display_prompt_dict(prompts_dict): for k, p in prompts_dict.items(): text_md = f"**Prompt Key**: {k}<br>" f"**Text:** <br>" display(Markdown(text_md)) print(p.get_template()) display(Markdown("<br><br>")) prompts_dict = query_engine.get_prompts() display_prompt_dict(prompts_dict) prompts_dict = query_engine.response_synthesizer.get_prompts() display_prompt_dict(prompts_dict) query_engine = index.as_query_engine(response_mode="compact") prompts_dict = query_engine.get_prompts() display_prompt_dict(prompts_dict) response = query_engine.query("What did the author do growing up?") print(str(response)) from llama_index.core import PromptTemplate query_engine = index.as_query_engine(response_mode="tree_summarize") new_summary_tmpl_str = ( "Context information is below.\n" "---------------------\n" "{context_str}\n" "---------------------\n" "Given the context information and not prior knowledge, " "answer the query in the style of a Shakespeare play.\n" "Query: {query_str}\n" "Answer: " ) new_summary_tmpl = PromptTemplate(new_summary_tmpl_str) query_engine.update_prompts( {"response_synthesizer:summary_template": new_summary_tmpl} ) prompts_dict = query_engine.get_prompts() display_prompt_dict(prompts_dict) response = query_engine.query("What did the author do growing up?") print(str(response)) from llama_index.core.query_engine import ( RouterQueryEngine, FLAREInstructQueryEngine, ) from llama_index.core.selectors import LLMMultiSelector from llama_index.core.evaluation import FaithfulnessEvaluator, DatasetGenerator from llama_index.core.postprocessor import LLMRerank from llama_index.core.tools import QueryEngineTool query_tool = QueryEngineTool.from_defaults( query_engine=query_engine, description="test description" ) router_query_engine = RouterQueryEngine.from_defaults([query_tool]) prompts_dict = router_query_engine.get_prompts() display_prompt_dict(prompts_dict) flare_query_engine = FLAREInstructQueryEngine(query_engine) prompts_dict = flare_query_engine.get_prompts() display_prompt_dict(prompts_dict) from llama_index.core.selectors import LLMSingleSelector selector =
LLMSingleSelector.from_defaults()
llama_index.core.selectors.LLMSingleSelector.from_defaults
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-redis') get_ipython().run_line_magic('pip', 'install llama-index-storage-index-store-redis') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() import logging import sys import os logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import SimpleDirectoryReader, StorageContext from llama_index.core import VectorStoreIndex, SimpleKeywordTableIndex from llama_index.core import SummaryIndex from llama_index.core import ComposableGraph from llama_index.llms.openai import OpenAI from llama_index.core.response.notebook_utils import display_response from llama_index.core import Settings get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") reader = SimpleDirectoryReader("./data/paul_graham/") documents = reader.load_data() from llama_index.core.node_parser import SentenceSplitter nodes = SentenceSplitter().get_nodes_from_documents(documents) REDIS_HOST = os.getenv("REDIS_HOST", "127.0.0.1") REDIS_PORT = os.getenv("REDIS_PORT", 6379) from llama_index.storage.docstore.redis import RedisDocumentStore from llama_index.storage.index_store.redis import RedisIndexStore storage_context = StorageContext.from_defaults( docstore=RedisDocumentStore.from_host_and_port( host=REDIS_HOST, port=REDIS_PORT, namespace="llama_index" ), index_store=RedisIndexStore.from_host_and_port( host=REDIS_HOST, port=REDIS_PORT, namespace="llama_index" ), ) storage_context.docstore.add_documents(nodes) len(storage_context.docstore.docs) summary_index = SummaryIndex(nodes, storage_context=storage_context) vector_index = VectorStoreIndex(nodes, storage_context=storage_context) keyword_table_index = SimpleKeywordTableIndex( nodes, storage_context=storage_context ) len(storage_context.docstore.docs) storage_context.persist(persist_dir="./storage") list_id = summary_index.index_id vector_id = vector_index.index_id keyword_id = keyword_table_index.index_id from llama_index.core import load_index_from_storage storage_context = StorageContext.from_defaults( docstore=RedisDocumentStore.from_host_and_port( host=REDIS_HOST, port=REDIS_PORT, namespace="llama_index" ), index_store=RedisIndexStore.from_host_and_port( host=REDIS_HOST, port=REDIS_PORT, namespace="llama_index" ), ) summary_index = load_index_from_storage( storage_context=storage_context, index_id=list_id ) vector_index = load_index_from_storage( storage_context=storage_context, index_id=vector_id ) keyword_table_index = load_index_from_storage( storage_context=storage_context, index_id=keyword_id ) chatgpt = OpenAI(temperature=0, model="gpt-3.5-turbo") Settings.llm = chatgpt Settings.chunk_size = 1024 query_engine = summary_index.as_query_engine() list_response = query_engine.query("What is a summary of this document?")
display_response(list_response)
llama_index.core.response.notebook_utils.display_response
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() get_ipython().system("wget 'https://raw.githubusercontent.com/jerryjliu/llama_index/main/examples/gatsby/gatsby_full.txt' -O 'gatsby_full.txt'") from llama_index.core import SimpleDirectoryReader documents = SimpleDirectoryReader( input_files=["./gatsby_full.txt"] ).load_data() from llama_index.llms.openai import OpenAI from llama_index.core import Settings Settings.llm = OpenAI(model="gpt-3.5-turbo") Settings.chunk_size = 1024 nodes =
Settings.node_parser.get_nodes_from_documents(documents)
llama_index.core.Settings.node_parser.get_nodes_from_documents
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-firestore') get_ipython().run_line_magic('pip', 'install llama-index-storage-kvstore-firestore') get_ipython().run_line_magic('pip', 'install llama-index-storage-index-store-firestore') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import SimpleDirectoryReader, StorageContext from llama_index.core import VectorStoreIndex, SimpleKeywordTableIndex from llama_index.core import SummaryIndex from llama_index.core import ComposableGraph from llama_index.llms.openai import OpenAI from llama_index.core.response.notebook_utils import display_response from llama_index.core import Settings get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") reader = SimpleDirectoryReader("./data/paul_graham/") documents = reader.load_data() from llama_index.core.node_parser import SentenceSplitter nodes = SentenceSplitter().get_nodes_from_documents(documents) from llama_index.storage.kvstore.firestore import FirestoreKVStore from llama_index.storage.docstore.firestore import FirestoreDocumentStore from llama_index.storage.index_store.firestore import FirestoreIndexStore kvstore =
FirestoreKVStore()
llama_index.storage.kvstore.firestore.FirestoreKVStore
get_ipython().run_line_magic('pip', 'install llama-index-readers-database') get_ipython().system('pip install llama-index') import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from __future__ import absolute_import import os os.environ["OPENAI_API_KEY"] = "" from llama_index.readers.database import DatabaseReader from llama_index.core import VectorStoreIndex db = DatabaseReader( scheme="postgresql", # Database Scheme host="localhost", # Database Host port="5432", # Database Port user="postgres", # Database User password="FakeExamplePassword", # Database Password dbname="postgres", # Database Name ) print(type(db)) print(type(db.load_data)) print(type(db.sql_database)) print(type(db.sql_database.from_uri)) print(type(db.sql_database.get_single_table_info)) print(type(db.sql_database.get_table_columns)) print(type(db.sql_database.get_usable_table_names)) print(type(db.sql_database.insert_into_table)) print(type(db.sql_database.run_sql)) print(type(db.sql_database.dialect)) print(type(db.sql_database.engine)) print(type(db.sql_database)) db_from_sql_database = DatabaseReader(sql_database=db.sql_database) print(type(db_from_sql_database)) print(type(db.sql_database.engine)) db_from_engine = DatabaseReader(engine=db.sql_database.engine) print(type(db_from_engine)) print(type(db.uri)) db_from_uri =
DatabaseReader(uri=db.uri)
llama_index.readers.database.DatabaseReader
get_ipython().run_line_magic('pip', 'install llama-index-readers-elasticsearch') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-opensearch') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-ollama') get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") from os import getenv from llama_index.core import SimpleDirectoryReader from llama_index.vector_stores.opensearch import ( OpensearchVectorStore, OpensearchVectorClient, ) from llama_index.core import VectorStoreIndex, StorageContext endpoint = getenv("OPENSEARCH_ENDPOINT", "http://localhost:9200") idx = getenv("OPENSEARCH_INDEX", "gpt-index-demo") documents = SimpleDirectoryReader("./data/paul_graham/").load_data() text_field = "content" embedding_field = "embedding" client = OpensearchVectorClient( endpoint, idx, 1536, embedding_field=embedding_field, text_field=text_field ) vector_store = OpensearchVectorStore(client) storage_context = StorageContext.from_defaults(vector_store=vector_store) index = VectorStoreIndex.from_documents( documents=documents, storage_context=storage_context ) query_engine = index.as_query_engine() res = query_engine.query("What did the author do growing up?") res.response from llama_index.core import Document from llama_index.core.vector_stores import MetadataFilters, ExactMatchFilter import regex as re text_chunks = documents[0].text.split("\n\n") footnotes = [ Document( text=chunk, id=documents[0].doc_id, metadata={"is_footnote": bool(re.search(r"^\s*\[\d+\]\s*", chunk))}, ) for chunk in text_chunks if bool(re.search(r"^\s*\[\d+\]\s*", chunk)) ] for f in footnotes: index.insert(f) footnote_query_engine = index.as_query_engine( filters=MetadataFilters( filters=[ ExactMatchFilter( key="term", value='{"metadata.is_footnote": "true"}' ), ExactMatchFilter( key="query_string", value='{"query": "content: space AND content: lisp"}', ), ] ) ) res = footnote_query_engine.query( "What did the author about space aliens and lisp?" ) res.response from llama_index.readers.elasticsearch import ElasticsearchReader rdr = ElasticsearchReader(endpoint, idx) docs = rdr.load_data(text_field, embedding_field=embedding_field) print("embedding dimension:", len(docs[0].embedding)) print("all fields in index:", docs[0].metadata.keys()) print("total number of chunks created:", len(docs)) docs = rdr.load_data(text_field, {"query": {"match": {text_field: "Lisp"}}}) print("chunks that mention Lisp:", len(docs)) docs = rdr.load_data(text_field, {"query": {"match": {text_field: "Yahoo"}}}) print("chunks that mention Yahoo:", len(docs)) from os import getenv from llama_index.vector_stores.opensearch import ( OpensearchVectorStore, OpensearchVectorClient, ) endpoint = getenv("OPENSEARCH_ENDPOINT", "http://localhost:9200") idx = getenv("OPENSEARCH_INDEX", "auto_retriever_movies") text_field = "content" embedding_field = "embedding" client = OpensearchVectorClient( endpoint, idx, 4096, embedding_field=embedding_field, text_field=text_field, search_pipeline="hybrid-search-pipeline", ) from llama_index.embeddings.ollama import OllamaEmbedding embed_model =
OllamaEmbedding(model_name="llama2")
llama_index.embeddings.ollama.OllamaEmbedding
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-redis') get_ipython().run_line_magic('pip', 'install llama-index-storage-index-store-redis') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() import logging import sys import os logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import SimpleDirectoryReader, StorageContext from llama_index.core import VectorStoreIndex, SimpleKeywordTableIndex from llama_index.core import SummaryIndex from llama_index.core import ComposableGraph from llama_index.llms.openai import OpenAI from llama_index.core.response.notebook_utils import display_response from llama_index.core import Settings get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") reader = SimpleDirectoryReader("./data/paul_graham/") documents = reader.load_data() from llama_index.core.node_parser import SentenceSplitter nodes = SentenceSplitter().get_nodes_from_documents(documents) REDIS_HOST = os.getenv("REDIS_HOST", "127.0.0.1") REDIS_PORT = os.getenv("REDIS_PORT", 6379) from llama_index.storage.docstore.redis import RedisDocumentStore from llama_index.storage.index_store.redis import RedisIndexStore storage_context = StorageContext.from_defaults( docstore=RedisDocumentStore.from_host_and_port( host=REDIS_HOST, port=REDIS_PORT, namespace="llama_index" ), index_store=RedisIndexStore.from_host_and_port( host=REDIS_HOST, port=REDIS_PORT, namespace="llama_index" ), ) storage_context.docstore.add_documents(nodes) len(storage_context.docstore.docs) summary_index =
SummaryIndex(nodes, storage_context=storage_context)
llama_index.core.SummaryIndex
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() import os os.environ["OPENAI_API_KEY"] = "sk-..." from llama_index.llms.openai import OpenAI from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.core import Settings Settings.llm = OpenAI(model="gpt-3.5-turbo-1106", temperature=0.2) Settings.embed_model =
OpenAIEmbedding(model="text-embedding-3-small")
llama_index.embeddings.openai.OpenAIEmbedding
import os from getpass import getpass if os.getenv("OPENAI_API_KEY") is None: os.environ["OPENAI_API_KEY"] = getpass( "Paste your OpenAI key from:" " https://platform.openai.com/account/api-keys\n" ) assert os.getenv("OPENAI_API_KEY", "").startswith( "sk-" ), "This doesn't look like a valid OpenAI API key" print("OpenAI API key configured") get_ipython().run_line_magic('pip', 'install -q html2text llama-index pandas pyarrow tqdm') get_ipython().run_line_magic('pip', 'install -q llama-index-readers-web') get_ipython().run_line_magic('pip', 'install -q llama-index-callbacks-openinference') import hashlib import json from pathlib import Path import os import textwrap from typing import List, Union import llama_index.core from llama_index.readers.web import SimpleWebPageReader from llama_index.core import VectorStoreIndex from llama_index.core.node_parser import SentenceSplitter from llama_index.core.callbacks import CallbackManager from llama_index.callbacks.openinference import OpenInferenceCallbackHandler from llama_index.callbacks.openinference.base import ( as_dataframe, QueryData, NodeData, ) from llama_index.core.node_parser import SimpleNodeParser import pandas as pd from tqdm import tqdm documents = SimpleWebPageReader().load_data( [ "https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt" ] ) print(documents[0].text) parser = SentenceSplitter() nodes = parser.get_nodes_from_documents(documents) print(nodes[0].text) callback_handler = OpenInferenceCallbackHandler() callback_manager = CallbackManager([callback_handler]) llama_index.core.Settings.callback_manager = callback_manager index = VectorStoreIndex.from_documents(documents) query_engine = index.as_query_engine() max_characters_per_line = 80 queries = [ "What did Paul Graham do growing up?", "When and how did Paul Graham's mother die?", "What, in Paul Graham's opinion, is the most distinctive thing about YC?", "When and how did Paul Graham meet Jessica Livingston?", "What is Bel, and when and where was it written?", ] for query in queries: response = query_engine.query(query) print("Query") print("=====") print(textwrap.fill(query, max_characters_per_line)) print() print("Response") print("========") print(textwrap.fill(str(response), max_characters_per_line)) print() query_data_buffer = callback_handler.flush_query_data_buffer() query_dataframe =
as_dataframe(query_data_buffer)
llama_index.callbacks.openinference.base.as_dataframe
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-redis') get_ipython().run_line_magic('pip', 'install llama-index-storage-index-store-redis') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() import logging import sys import os logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import SimpleDirectoryReader, StorageContext from llama_index.core import VectorStoreIndex, SimpleKeywordTableIndex from llama_index.core import SummaryIndex from llama_index.core import ComposableGraph from llama_index.llms.openai import OpenAI from llama_index.core.response.notebook_utils import display_response from llama_index.core import Settings get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") reader = SimpleDirectoryReader("./data/paul_graham/") documents = reader.load_data() from llama_index.core.node_parser import SentenceSplitter nodes = SentenceSplitter().get_nodes_from_documents(documents) REDIS_HOST = os.getenv("REDIS_HOST", "127.0.0.1") REDIS_PORT = os.getenv("REDIS_PORT", 6379) from llama_index.storage.docstore.redis import RedisDocumentStore from llama_index.storage.index_store.redis import RedisIndexStore storage_context = StorageContext.from_defaults( docstore=RedisDocumentStore.from_host_and_port( host=REDIS_HOST, port=REDIS_PORT, namespace="llama_index" ), index_store=RedisIndexStore.from_host_and_port( host=REDIS_HOST, port=REDIS_PORT, namespace="llama_index" ), ) storage_context.docstore.add_documents(nodes) len(storage_context.docstore.docs) summary_index = SummaryIndex(nodes, storage_context=storage_context) vector_index =
VectorStoreIndex(nodes, storage_context=storage_context)
llama_index.core.VectorStoreIndex
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-cohere-rerank') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().handlers = [] logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import ( VectorStoreIndex, SimpleDirectoryReader, StorageContext, ) from llama_index.core import SummaryIndex from llama_index.core.response.notebook_utils import display_response from llama_index.llms.openai import OpenAI get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') from pathlib import Path from llama_index.core import Document from llama_index.readers.file import PyMuPDFReader loader = PyMuPDFReader() docs0 = loader.load(file_path=Path("./data/llama2.pdf")) doc_text = "\n\n".join([d.get_content() for d in docs0]) docs = [Document(text=doc_text)] llm = OpenAI(model="gpt-4") chunk_sizes = [128, 256, 512, 1024] nodes_list = [] vector_indices = [] for chunk_size in chunk_sizes: print(f"Chunk Size: {chunk_size}") splitter = SentenceSplitter(chunk_size=chunk_size) nodes = splitter.get_nodes_from_documents(docs) for node in nodes: node.metadata["chunk_size"] = chunk_size node.excluded_embed_metadata_keys = ["chunk_size"] node.excluded_llm_metadata_keys = ["chunk_size"] nodes_list.append(nodes) vector_index = VectorStoreIndex(nodes) vector_indices.append(vector_index) from llama_index.core.tools import RetrieverTool from llama_index.core.schema import IndexNode retriever_dict = {} retriever_nodes = [] for chunk_size, vector_index in zip(chunk_sizes, vector_indices): node_id = f"chunk_{chunk_size}" node = IndexNode( text=( "Retrieves relevant context from the Llama 2 paper (chunk size" f" {chunk_size})" ), index_id=node_id, ) retriever_nodes.append(node) retriever_dict[node_id] = vector_index.as_retriever() from llama_index.core.selectors import PydanticMultiSelector from llama_index.core.retrievers import RouterRetriever from llama_index.core.retrievers import RecursiveRetriever from llama_index.core import SummaryIndex summary_index = SummaryIndex(retriever_nodes) retriever = RecursiveRetriever( root_id="root", retriever_dict={"root": summary_index.as_retriever(), **retriever_dict}, ) nodes = await retriever.aretrieve( "Tell me about the main aspects of safety fine-tuning" ) print(f"Number of nodes: {len(nodes)}") for node in nodes: print(node.node.metadata["chunk_size"]) print(node.node.get_text()) from llama_index.core.postprocessor import LLMRerank, SentenceTransformerRerank from llama_index.postprocessor.cohere_rerank import CohereRerank reranker = CohereRerank(top_n=10) from llama_index.core.query_engine import RetrieverQueryEngine query_engine = RetrieverQueryEngine(retriever, node_postprocessors=[reranker]) response = query_engine.query( "Tell me about the main aspects of safety fine-tuning" ) display_response( response, show_source=True, source_length=500, show_source_metadata=True ) from collections import defaultdict import pandas as pd def mrr_all(metadata_values, metadata_key, source_nodes): value_to_mrr_dict = {} for metadata_value in metadata_values: mrr = 0 for idx, source_node in enumerate(source_nodes): if source_node.node.metadata[metadata_key] == metadata_value: mrr = 1 / (idx + 1) break else: continue value_to_mrr_dict[metadata_value] = mrr df = pd.DataFrame(value_to_mrr_dict, index=["MRR"]) df.style.set_caption("Mean Reciprocal Rank") return df print("Mean Reciprocal Rank for each Chunk Size") mrr_all(chunk_sizes, "chunk_size", response.source_nodes) from llama_index.core.evaluation import DatasetGenerator, QueryResponseDataset from llama_index.llms.openai import OpenAI import nest_asyncio nest_asyncio.apply() eval_llm = OpenAI(model="gpt-4") dataset_generator = DatasetGenerator( nodes_list[-1], llm=eval_llm, show_progress=True, num_questions_per_chunk=2, ) eval_dataset = await dataset_generator.agenerate_dataset_from_nodes(num=60) eval_dataset.save_json("data/llama2_eval_qr_dataset.json") eval_dataset = QueryResponseDataset.from_json( "data/llama2_eval_qr_dataset.json" ) import asyncio import nest_asyncio nest_asyncio.apply() from llama_index.core.evaluation import ( CorrectnessEvaluator, SemanticSimilarityEvaluator, RelevancyEvaluator, FaithfulnessEvaluator, PairwiseComparisonEvaluator, ) evaluator_c = CorrectnessEvaluator(llm=eval_llm) evaluator_s = SemanticSimilarityEvaluator(llm=eval_llm) evaluator_r =
RelevancyEvaluator(llm=eval_llm)
llama_index.core.evaluation.RelevancyEvaluator
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-mongodb') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant') get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-firestore') get_ipython().run_line_magic('pip', 'install llama-index-retrievers-bm25') get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-redis') get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-dynamodb') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "./llama2.pdf"') get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/1706.03762.pdf" -O "./attention.pdf"') from llama_index.core import download_loader from llama_index.readers.file import PyMuPDFReader llama2_docs =
PyMuPDFReader()
llama_index.readers.file.PyMuPDFReader
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-web') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() import os import openai from llama_index.core import set_global_handler set_global_handler("wandb", run_args={"project": "llamaindex"}) os.environ["OPENAI_API_KEY"] = "sk-..." openai.api_key = os.environ["OPENAI_API_KEY"] from llama_index.llms.openai import OpenAI from llama_index.core.schema import MetadataMode llm = OpenAI(temperature=0.1, model="gpt-3.5-turbo", max_tokens=512) from llama_index.core.node_parser import TokenTextSplitter from llama_index.core.extractors import ( SummaryExtractor, QuestionsAnsweredExtractor, ) node_parser = TokenTextSplitter( separator=" ", chunk_size=256, chunk_overlap=128 ) extractors_1 = [ QuestionsAnsweredExtractor( questions=3, llm=llm, metadata_mode=MetadataMode.EMBED ), ] extractors_2 = [
SummaryExtractor(summaries=["prev", "self", "next"], llm=llm)
llama_index.core.extractors.SummaryExtractor
get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-cohere-rerank') get_ipython().system('pip install llama-index') from llama_index.core import ( VectorStoreIndex, SimpleDirectoryReader, pprint_response, ) get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents = SimpleDirectoryReader("./data/paul_graham/").load_data() index = VectorStoreIndex.from_documents(documents=documents) import os from llama_index.postprocessor.cohere_rerank import CohereRerank api_key = os.environ["COHERE_API_KEY"] cohere_rerank = CohereRerank(api_key=api_key, top_n=2) query_engine = index.as_query_engine( similarity_top_k=10, node_postprocessors=[cohere_rerank], ) response = query_engine.query( "What did Sam Altman do in this essay?", )
pprint_response(response)
llama_index.core.pprint_response
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') from llama_index.agent.openai import OpenAIAgent from llama_index.llms.openai import OpenAI from llama_index.core.tools import BaseTool, FunctionTool def multiply(a: int, b: int) -> int: """Multiple two integers and returns the result integer""" return a * b multiply_tool = FunctionTool.from_defaults(fn=multiply) def add(a: int, b: int) -> int: """Add two integers and returns the result integer""" return a + b add_tool = FunctionTool.from_defaults(fn=add) llm = OpenAI(model="gpt-3.5-turbo-1106") agent = OpenAIAgent.from_tools( [multiply_tool, add_tool], llm=llm, verbose=True ) response = agent.chat("What is (121 * 3) + 42?") print(str(response)) response = agent.stream_chat("What is (121 * 3) + 42?") import nest_asyncio nest_asyncio.apply() response = await agent.achat("What is (121 * 3) + 42?") print(str(response)) response = await agent.astream_chat("What is (121 * 3) + 42?") response_gen = response.response_gen async for token in response.async_response_gen(): print(token, end="") import json def get_current_weather(location, unit="fahrenheit"): """Get the current weather in a given location""" if "tokyo" in location.lower(): return json.dumps( {"location": location, "temperature": "10", "unit": "celsius"} ) elif "san francisco" in location.lower(): return json.dumps( {"location": location, "temperature": "72", "unit": "fahrenheit"} ) else: return json.dumps( {"location": location, "temperature": "22", "unit": "celsius"} ) weather_tool = FunctionTool.from_defaults(fn=get_current_weather) llm = OpenAI(model="gpt-3.5-turbo-1106") agent =
OpenAIAgent.from_tools([weather_tool], llm=llm, verbose=True)
llama_index.agent.openai.OpenAIAgent.from_tools
get_ipython().system('pip install llama_index') get_ipython().system('pip install llama_hub') get_ipython().system('pip install torch_geometric') import os from pprint import pprint from llama_index import ( ServiceContext, VectorStoreIndex, SummaryIndex, ) import llama_hub.docstring_walker as docstring_walker walker = docstring_walker.DocstringWalker() path_to_docstring_walker = os.path.dirname(docstring_walker.__file__) example1_docs = walker.load_data(path_to_docstring_walker) print(example1_docs[0].text) example1_index = VectorStoreIndex(example1_docs) example1_query_engine = example1_index.as_query_engine() pprint( example1_query_engine.query("What is the main purpose of DocstringWalker?").response ) print( example1_query_engine.query( "What are the main functions used in DocstringWalker. Use numbered list, briefly describe each function." ).response ) import torch_geometric.nn.kge as kge path_to_module = os.path.dirname(kge.__file__) example2_docs = walker.load_data(path_to_module) example2_index =
SummaryIndex(example2_docs)
llama_index.SummaryIndex
get_ipython().run_line_magic('pip', 'install llama-index-llms-fireworks') get_ipython().run_line_magic('pip', 'install llama-index') from llama_index.llms.fireworks import Fireworks resp = Fireworks().complete("Paul Graham is ") print(resp) from llama_index.core.llms import ChatMessage from llama_index.llms.fireworks import Fireworks messages = [ ChatMessage( role="system", content="You are a pirate with a colorful personality" ), ChatMessage(role="user", content="What is your name"), ] resp =
Fireworks()
llama_index.llms.fireworks.Fireworks
get_ipython().run_line_magic('pip', 'install llama-index-storage-docstore-dynamodb') get_ipython().run_line_magic('pip', 'install llama-index-storage-index-store-dynamodb') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-dynamodb') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() import logging import sys import os logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import SimpleDirectoryReader, StorageContext from llama_index.core import VectorStoreIndex, SimpleKeywordTableIndex from llama_index.core import SummaryIndex from llama_index.llms.openai import OpenAI from llama_index.core.response.notebook_utils import display_response from llama_index.core import Settings get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") reader = SimpleDirectoryReader("./data/paul_graham/") documents = reader.load_data() from llama_index.core.node_parser import SentenceSplitter nodes = SentenceSplitter().get_nodes_from_documents(documents) TABLE_NAME = os.environ["DYNAMODB_TABLE_NAME"] from llama_index.storage.docstore.dynamodb import DynamoDBDocumentStore from llama_index.storage.index_store.dynamodb import DynamoDBIndexStore from llama_index.vector_stores.dynamodb import DynamoDBVectorStore storage_context = StorageContext.from_defaults( docstore=DynamoDBDocumentStore.from_table_name(table_name=TABLE_NAME), index_store=DynamoDBIndexStore.from_table_name(table_name=TABLE_NAME), vector_store=DynamoDBVectorStore.from_table_name(table_name=TABLE_NAME), ) storage_context.docstore.add_documents(nodes) summary_index = SummaryIndex(nodes, storage_context=storage_context) vector_index = VectorStoreIndex(nodes, storage_context=storage_context) keyword_table_index = SimpleKeywordTableIndex( nodes, storage_context=storage_context ) len(storage_context.docstore.docs) storage_context.persist() list_id = summary_index.index_id vector_id = vector_index.index_id keyword_id = keyword_table_index.index_id from llama_index.core import load_index_from_storage storage_context = StorageContext.from_defaults( docstore=DynamoDBDocumentStore.from_table_name(table_name=TABLE_NAME), index_store=
DynamoDBIndexStore.from_table_name(table_name=TABLE_NAME)
llama_index.storage.index_store.dynamodb.DynamoDBIndexStore.from_table_name
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().handlers = [] logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import ( VectorStoreIndex, SimpleDirectoryReader, StorageContext, SimpleKeywordTableIndex, ) from llama_index.core import SummaryIndex from llama_index.core.node_parser import SentenceSplitter from llama_index.llms.openai import OpenAI get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents = SimpleDirectoryReader("./data/paul_graham/").load_data() llm = OpenAI(model="gpt-4") splitter = SentenceSplitter(chunk_size=1024) nodes = splitter.get_nodes_from_documents(documents) storage_context = StorageContext.from_defaults() storage_context.docstore.add_documents(nodes) summary_index = SummaryIndex(nodes, storage_context=storage_context) vector_index = VectorStoreIndex(nodes, storage_context=storage_context) keyword_index = SimpleKeywordTableIndex(nodes, storage_context=storage_context) list_retriever = summary_index.as_retriever() vector_retriever = vector_index.as_retriever() keyword_retriever = keyword_index.as_retriever() from llama_index.core.tools import RetrieverTool list_tool = RetrieverTool.from_defaults( retriever=list_retriever, description=( "Will retrieve all context from Paul Graham's essay on What I Worked" " On. Don't use if the question only requires more specific context." ), ) vector_tool = RetrieverTool.from_defaults( retriever=vector_retriever, description=( "Useful for retrieving specific context from Paul Graham essay on What" " I Worked On." ), ) keyword_tool = RetrieverTool.from_defaults( retriever=keyword_retriever, description=( "Useful for retrieving specific context from Paul Graham essay on What" " I Worked On (using entities mentioned in query)" ), ) from llama_index.core.selectors import LLMSingleSelector, LLMMultiSelector from llama_index.core.selectors import ( PydanticMultiSelector, PydanticSingleSelector, ) from llama_index.core.retrievers import RouterRetriever from llama_index.core.response.notebook_utils import display_source_node retriever = RouterRetriever( selector=PydanticSingleSelector.from_defaults(llm=llm), retriever_tools=[ list_tool, vector_tool, ], ) nodes = retriever.retrieve( "Can you give me all the context regarding the author's life?" ) for node in nodes: display_source_node(node) nodes = retriever.retrieve("What did Paul Graham do after RISD?") for node in nodes:
display_source_node(node)
llama_index.core.response.notebook_utils.display_source_node
get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-docarray') get_ipython().system('pip install llama-index') import os import sys import logging import textwrap import warnings warnings.filterwarnings("ignore") os.environ["TOKENIZERS_PARALLELISM"] = "false" from llama_index.core import ( GPTVectorStoreIndex, SimpleDirectoryReader, Document, ) from llama_index.vector_stores.docarray import DocArrayHnswVectorStore from IPython.display import Markdown, display import os os.environ["OPENAI_API_KEY"] = "<your openai key>" get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") documents = SimpleDirectoryReader("./data/paul_graham/").load_data() print( "Document ID:", documents[0].doc_id, "Document Hash:", documents[0].doc_hash, ) from llama_index.core import StorageContext vector_store = DocArrayHnswVectorStore(work_dir="hnsw_index") storage_context = StorageContext.from_defaults(vector_store=vector_store) index = GPTVectorStoreIndex.from_documents( documents, storage_context=storage_context ) query_engine = index.as_query_engine() response = query_engine.query("What did the author do growing up?") print(textwrap.fill(str(response), 100)) response = query_engine.query("What was a hard moment for the author?") print(textwrap.fill(str(response), 100)) from llama_index.core.schema import TextNode nodes = [ TextNode( text="The Shawshank Redemption", metadata={ "author": "Stephen King", "theme": "Friendship", }, ), TextNode( text="The Godfather", metadata={ "director": "Francis Ford Coppola", "theme": "Mafia", }, ), TextNode( text="Inception", metadata={ "director": "Christopher Nolan", }, ), ] from llama_index.core import StorageContext vector_store =
DocArrayHnswVectorStore(work_dir="hnsw_filters")
llama_index.vector_stores.docarray.DocArrayHnswVectorStore
get_ipython().run_line_magic('pip', 'install llama-index-llms-litellm') get_ipython().system('pip install llama-index') import os from llama_index.llms.litellm import LiteLLM from llama_index.core.llms import ChatMessage os.environ["OPENAI_API_KEY"] = "your-api-key" os.environ["COHERE_API_KEY"] = "your-api-key" message = ChatMessage(role="user", content="Hey! how's it going?") llm = LiteLLM("gpt-3.5-turbo") chat_response = llm.chat([message]) llm = LiteLLM("command-nightly") chat_response = llm.chat([message]) from llama_index.core.llms import ChatMessage from llama_index.llms.litellm import LiteLLM messages = [ ChatMessage( role="system", content="You are a pirate with a colorful personality" ), ChatMessage(role="user", content="Tell me a story"), ] resp = LiteLLM("gpt-3.5-turbo").chat(messages) print(resp) from llama_index.llms.litellm import LiteLLM llm = LiteLLM("gpt-3.5-turbo") resp = llm.stream_complete("Paul Graham is ") for r in resp: print(r.delta, end="") from llama_index.llms.litellm import LiteLLM messages = [ ChatMessage( role="system", content="You are a pirate with a colorful personality" ), ChatMessage(role="user", content="Tell me a story"), ] llm = LiteLLM("gpt-3.5-turbo") resp = llm.stream_chat(messages) for r in resp: print(r.delta, end="") from llama_index.llms.litellm import LiteLLM llm =
LiteLLM("gpt-3.5-turbo")
llama_index.llms.litellm.LiteLLM
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') from llama_index.core.agent import ( CustomSimpleAgentWorker, Task, AgentChatResponse, ) from typing import Dict, Any, List, Tuple, Optional from llama_index.core.tools import BaseTool, QueryEngineTool from llama_index.core.program import LLMTextCompletionProgram from llama_index.core.output_parsers import PydanticOutputParser from llama_index.core.query_engine import RouterQueryEngine from llama_index.core import ChatPromptTemplate, PromptTemplate from llama_index.core.selectors import PydanticSingleSelector from llama_index.core.bridge.pydantic import Field, BaseModel from llama_index.core.llms import ChatMessage, MessageRole DEFAULT_PROMPT_STR = """ Given previous question/response pairs, please determine if an error has occurred in the response, and suggest \ a modified question that will not trigger the error. Examples of modified questions: - The question itself is modified to elicit a non-erroneous response - The question is augmented with context that will help the downstream system better answer the question. - The question is augmented with examples of negative responses, or other negative questions. An error means that either an exception has triggered, or the response is completely irrelevant to the question. Please return the evaluation of the response in the following JSON format. """ def get_chat_prompt_template( system_prompt: str, current_reasoning: Tuple[str, str] ) -> ChatPromptTemplate: system_msg = ChatMessage(role=MessageRole.SYSTEM, content=system_prompt) messages = [system_msg] for raw_msg in current_reasoning: if raw_msg[0] == "user": messages.append( ChatMessage(role=MessageRole.USER, content=raw_msg[1]) ) else: messages.append( ChatMessage(role=MessageRole.ASSISTANT, content=raw_msg[1]) ) return ChatPromptTemplate(message_templates=messages) class ResponseEval(BaseModel): """Evaluation of whether the response has an error.""" has_error: bool = Field( ..., description="Whether the response has an error." ) new_question: str = Field(..., description="The suggested new question.") explanation: str = Field( ..., description=( "The explanation for the error as well as for the new question." "Can include the direct stack trace as well." ), ) from llama_index.core.bridge.pydantic import PrivateAttr class RetryAgentWorker(CustomSimpleAgentWorker): """Agent worker that adds a retry layer on top of a router. Continues iterating until there's no errors / task is done. """ prompt_str: str = Field(default=DEFAULT_PROMPT_STR) max_iterations: int = Field(default=10) _router_query_engine: RouterQueryEngine = PrivateAttr() def __init__(self, tools: List[BaseTool], **kwargs: Any) -> None: """Init params.""" for tool in tools: if not isinstance(tool, QueryEngineTool): raise ValueError( f"Tool {tool.metadata.name} is not a query engine tool." ) self._router_query_engine = RouterQueryEngine( selector=PydanticSingleSelector.from_defaults(), query_engine_tools=tools, verbose=kwargs.get("verbose", False), ) super().__init__( tools=tools, **kwargs, ) def _initialize_state(self, task: Task, **kwargs: Any) -> Dict[str, Any]: """Initialize state.""" return {"count": 0, "current_reasoning": []} def _run_step( self, state: Dict[str, Any], task: Task, input: Optional[str] = None ) -> Tuple[AgentChatResponse, bool]: """Run step. Returns: Tuple of (agent_response, is_done) """ if "new_input" not in state: new_input = task.input else: new_input = state["new_input"] response = self._router_query_engine.query(new_input) state["current_reasoning"].extend( [("user", new_input), ("assistant", str(response))] ) chat_prompt_tmpl = get_chat_prompt_template( self.prompt_str, state["current_reasoning"] ) llm_program = LLMTextCompletionProgram.from_defaults( output_parser=PydanticOutputParser(output_cls=ResponseEval), prompt=chat_prompt_tmpl, llm=self.llm, ) response_eval = llm_program( query_str=new_input, response_str=str(response) ) if not response_eval.has_error: is_done = True else: is_done = False state["new_input"] = response_eval.new_question if self.verbose: print(f"> Question: {new_input}") print(f"> Response: {response}") print(f"> Response eval: {response_eval.dict()}") return AgentChatResponse(response=str(response)), is_done def _finalize_task(self, state: Dict[str, Any], **kwargs) -> None: """Finalize task.""" pass from llama_index.core.tools import QueryEngineTool from sqlalchemy import ( create_engine, MetaData, Table, Column, String, Integer, select, column, ) from llama_index.core import SQLDatabase engine = create_engine("sqlite:///:memory:", future=True) metadata_obj = MetaData() table_name = "city_stats" city_stats_table = Table( table_name, metadata_obj, Column("city_name", String(16), primary_key=True), Column("population", Integer), Column("country", String(16), nullable=False), ) metadata_obj.create_all(engine) from sqlalchemy import insert rows = [ {"city_name": "Toronto", "population": 2930000, "country": "Canada"}, {"city_name": "Tokyo", "population": 13960000, "country": "Japan"}, {"city_name": "Berlin", "population": 3645000, "country": "Germany"}, ] for row in rows: stmt = insert(city_stats_table).values(**row) with engine.begin() as connection: cursor = connection.execute(stmt) from llama_index.core.query_engine import NLSQLTableQueryEngine sql_database = SQLDatabase(engine, include_tables=["city_stats"]) sql_query_engine = NLSQLTableQueryEngine( sql_database=sql_database, tables=["city_stats"], verbose=True ) sql_tool = QueryEngineTool.from_defaults( query_engine=sql_query_engine, description=( "Useful for translating a natural language query into a SQL query over" " a table containing: city_stats, containing the population/country of" " each city" ), ) from llama_index.readers.wikipedia import WikipediaReader from llama_index.core import VectorStoreIndex cities = ["Toronto", "Berlin", "Tokyo"] wiki_docs = WikipediaReader().load_data(pages=cities) vector_tools = [] for city, wiki_doc in zip(cities, wiki_docs): vector_index = VectorStoreIndex.from_documents([wiki_doc]) vector_query_engine = vector_index.as_query_engine() vector_tool = QueryEngineTool.from_defaults( query_engine=vector_query_engine, description=f"Useful for answering semantic questions about {city}", ) vector_tools.append(vector_tool) from llama_index.core.agent import AgentRunner from llama_index.llms.openai import OpenAI llm = OpenAI(model="gpt-4") callback_manager = llm.callback_manager query_engine_tools = [sql_tool] + vector_tools agent_worker = RetryAgentWorker.from_tools( query_engine_tools, llm=llm, verbose=True, callback_manager=callback_manager, ) agent =
AgentRunner(agent_worker, callback_manager=callback_manager)
llama_index.core.agent.AgentRunner
get_ipython().run_line_magic('pip', 'install llama-index-llms-gemini') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-google') get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-google') get_ipython().run_line_magic('pip', 'install llama-index-response-synthesizers-google') get_ipython().run_line_magic('pip', 'install llama-index') get_ipython().run_line_magic('pip', 'install "google-ai-generativelanguage>=0.4,<=1.0"') get_ipython().run_line_magic('pip', 'install google-auth-oauthlib') from google.oauth2 import service_account from llama_index.vector_stores.google import set_google_config credentials = service_account.Credentials.from_service_account_file( "service_account_key.json", scopes=[ "https://www.googleapis.com/auth/generative-language.retriever", ], ) set_google_config(auth_credentials=credentials) get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") import llama_index.core.vector_stores.google.generativeai.genai_extension as genaix from typing import Iterable from random import randrange LLAMA_INDEX_COLAB_CORPUS_ID_PREFIX = f"llama-index-colab" SESSION_CORPUS_ID_PREFIX = ( f"{LLAMA_INDEX_COLAB_CORPUS_ID_PREFIX}-{randrange(1000000)}" ) def corpus_id(num_id: int) -> str: return f"{SESSION_CORPUS_ID_PREFIX}-{num_id}" SESSION_CORPUS_ID = corpus_id(1) def list_corpora() -> Iterable[genaix.Corpus]: client = genaix.build_semantic_retriever() yield from genaix.list_corpora(client=client) def delete_corpus(*, corpus_id: str) -> None: client = genaix.build_semantic_retriever() genaix.delete_corpus(corpus_id=corpus_id, client=client) def cleanup_colab_corpora(): for corpus in list_corpora(): if corpus.corpus_id.startswith(LLAMA_INDEX_COLAB_CORPUS_ID_PREFIX): try: delete_corpus(corpus_id=corpus.corpus_id) print(f"Deleted corpus {corpus.corpus_id}.") except Exception: pass cleanup_colab_corpora() from llama_index.core import SimpleDirectoryReader from llama_index.indices.managed.google import GoogleIndex from llama_index.core import Response import time index = GoogleIndex.create_corpus( corpus_id=SESSION_CORPUS_ID, display_name="My first corpus!" ) print(f"Newly created corpus ID is {index.corpus_id}.") documents = SimpleDirectoryReader("./data/paul_graham/").load_data() index.insert_documents(documents) for corpus in list_corpora(): print(corpus) query_engine = index.as_query_engine() response = query_engine.query("What did Paul Graham do growing up?") assert isinstance(response, Response) print(f"Response is {response.response}") for cited_text in [node.text for node in response.source_nodes]: print(f"Cited text: {cited_text}") if response.metadata: print( f"Answerability: {response.metadata.get('answerable_probability', 0)}" ) index = GoogleIndex.from_corpus(corpus_id=SESSION_CORPUS_ID) query_engine = index.as_query_engine() response = query_engine.query("Which company did Paul Graham build?") assert isinstance(response, Response) print(f"Response is {response.response}") from llama_index.core.schema import NodeRelationship, RelatedNodeInfo, TextNode index = GoogleIndex.from_corpus(corpus_id=SESSION_CORPUS_ID) index.insert_nodes( [ TextNode( text="It was the best of times.", relationships={ NodeRelationship.SOURCE: RelatedNodeInfo( node_id="123", metadata={"file_name": "Tale of Two Cities"}, ) }, ), TextNode( text="It was the worst of times.", relationships={ NodeRelationship.SOURCE: RelatedNodeInfo( node_id="123", metadata={"file_name": "Tale of Two Cities"}, ) }, ), TextNode( text="Bugs Bunny: Wassup doc?", relationships={ NodeRelationship.SOURCE: RelatedNodeInfo( node_id="456", metadata={"file_name": "Bugs Bunny Adventure"}, ) }, ), ] ) from google.ai.generativelanguage import ( GenerateAnswerRequest, HarmCategory, SafetySetting, ) index = GoogleIndex.from_corpus(corpus_id=SESSION_CORPUS_ID) query_engine = index.as_query_engine( temperature=0.2, answer_style=GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE, safety_setting=[ SafetySetting( category=HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT, threshold=SafetySetting.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE, ), SafetySetting( category=HarmCategory.HARM_CATEGORY_VIOLENCE, threshold=SafetySetting.HarmBlockThreshold.BLOCK_ONLY_HIGH, ), ], ) response = query_engine.query("What was Bugs Bunny's favorite saying?") print(response) from llama_index.core import Response response = query_engine.query("What were Paul Graham's achievements?") assert isinstance(response, Response) print(f"Response is {response.response}") for cited_text in [node.text for node in response.source_nodes]: print(f"Cited text: {cited_text}") if response.metadata: print( f"Answerability: {response.metadata.get('answerable_probability', 0)}" ) from llama_index.llms.gemini import Gemini GEMINI_API_KEY = "" # @param {type:"string"} gemini = Gemini(api_key=GEMINI_API_KEY) from llama_index.response_synthesizers.google import GoogleTextSynthesizer from llama_index.vector_stores.google import GoogleVectorStore from llama_index.core import VectorStoreIndex from llama_index.core.postprocessor import LLMRerank from llama_index.core.query_engine import RetrieverQueryEngine from llama_index.core.retrievers import VectorIndexRetriever store = GoogleVectorStore.from_corpus(corpus_id=SESSION_CORPUS_ID) index = VectorStoreIndex.from_vector_store( vector_store=store, ) response_synthesizer = GoogleTextSynthesizer.from_defaults( temperature=0.2, answer_style=GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE, ) reranker = LLMRerank( top_n=10, llm=gemini, ) query_engine = RetrieverQueryEngine.from_args( retriever=VectorIndexRetriever( index=index, similarity_top_k=20, ), node_postprocessors=[reranker], response_synthesizer=response_synthesizer, ) response = query_engine.query("What were Paul Graham's achievements?") print(response) from llama_index.core.indices.query.query_transform.base import ( StepDecomposeQueryTransform, ) from llama_index.core.query_engine import MultiStepQueryEngine store = GoogleVectorStore.from_corpus(corpus_id=SESSION_CORPUS_ID) index = VectorStoreIndex.from_vector_store( vector_store=store, ) response_synthesizer = GoogleTextSynthesizer.from_defaults( temperature=0.2, answer_style=GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE, ) single_step_query_engine = index.as_query_engine( similarity_top_k=10, response_synthesizer=response_synthesizer, ) step_decompose_transform = StepDecomposeQueryTransform( llm=gemini, verbose=True, ) query_engine = MultiStepQueryEngine( query_engine=single_step_query_engine, query_transform=step_decompose_transform, response_synthesizer=response_synthesizer, index_summary="Ask me anything.", num_steps=6, ) response = query_engine.query("What were Paul Graham's achievements?") print(response) from llama_index.core.indices.query.query_transform import HyDEQueryTransform from llama_index.core.query_engine import TransformQueryEngine store = GoogleVectorStore.from_corpus(corpus_id=SESSION_CORPUS_ID) index = VectorStoreIndex.from_vector_store( vector_store=store, ) response_synthesizer = GoogleTextSynthesizer.from_defaults( temperature=0.2, answer_style=GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE, ) base_query_engine = index.as_query_engine( similarity_top_k=10, response_synthesizer=response_synthesizer, ) hyde = HyDEQueryTransform( llm=gemini, include_original=False, ) hyde_query_engine = TransformQueryEngine(base_query_engine, hyde) response = query_engine.query("What were Paul Graham's achievements?") print(response) store = GoogleVectorStore.from_corpus(corpus_id=SESSION_CORPUS_ID) index = VectorStoreIndex.from_vector_store( vector_store=store, ) response_synthesizer = GoogleTextSynthesizer.from_defaults( temperature=0.2, answer_style=GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE ) reranker = LLMRerank( top_n=10, llm=gemini, ) single_step_query_engine = index.as_query_engine( similarity_top_k=20, node_postprocessors=[reranker], response_synthesizer=response_synthesizer, ) hyde = HyDEQueryTransform( llm=gemini, include_original=False, ) hyde_query_engine =
TransformQueryEngine(single_step_query_engine, hyde)
llama_index.core.query_engine.TransformQueryEngine
get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().system('pip install llama-index') import pandas as pd pd.set_option("display.max_rows", None) pd.set_option("display.max_columns", None) pd.set_option("display.width", None) pd.set_option("display.max_colwidth", None) get_ipython().system('wget "https://www.dropbox.com/scl/fi/mlaymdy1ni1ovyeykhhuk/tesla_2021_10k.htm?rlkey=qf9k4zn0ejrbm716j0gg7r802&dl=1" -O tesla_2021_10k.htm') get_ipython().system('wget "https://www.dropbox.com/scl/fi/rkw0u959yb4w8vlzz76sa/tesla_2020_10k.htm?rlkey=tfkdshswpoupav5tqigwz1mp7&dl=1" -O tesla_2020_10k.htm') from llama_index.readers.file import FlatReader from pathlib import Path reader = FlatReader() docs = reader.load_data(Path("./tesla_2020_10k.htm")) from llama_index.core.evaluation import DatasetGenerator, QueryResponseDataset from llama_index.llms.openai import OpenAI from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.readers.file import FlatReader from llama_index.core.node_parser import HTMLNodeParser, SentenceSplitter from llama_index.core.ingestion import IngestionPipeline from pathlib import Path import nest_asyncio nest_asyncio.apply() reader = FlatReader() docs = reader.load_data(Path("./tesla_2020_10k.htm")) pipeline = IngestionPipeline( documents=docs, transformations=[ HTMLNodeParser.from_defaults(), SentenceSplitter(chunk_size=1024, chunk_overlap=200), OpenAIEmbedding(), ], ) eval_nodes = pipeline.run(documents=docs) eval_llm = OpenAI(model="gpt-3.5-turbo") dataset_generator = DatasetGenerator( eval_nodes[:100], llm=eval_llm, show_progress=True, num_questions_per_chunk=3, ) eval_dataset = await dataset_generator.agenerate_dataset_from_nodes(num=100) len(eval_dataset.qr_pairs) eval_dataset.save_json("data/tesla10k_eval_dataset.json") eval_dataset = QueryResponseDataset.from_json( "data/tesla10k_eval_dataset.json" ) eval_qs = eval_dataset.questions qr_pairs = eval_dataset.qr_pairs ref_response_strs = [r for (_, r) in qr_pairs] from llama_index.core.evaluation import ( CorrectnessEvaluator, SemanticSimilarityEvaluator, ) from llama_index.core.evaluation.eval_utils import ( get_responses, get_results_df, ) from llama_index.core.evaluation import BatchEvalRunner evaluator_c = CorrectnessEvaluator(llm=eval_llm) evaluator_s =
SemanticSimilarityEvaluator(llm=eval_llm)
llama_index.core.evaluation.SemanticSimilarityEvaluator
get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-gemini') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant') get_ipython().run_line_magic('pip', 'install llama-index-embeddings-gemini') get_ipython().run_line_magic('pip', 'install llama-index-llms-gemini') get_ipython().system("pip install llama-index 'google-generativeai>=0.3.0' matplotlib qdrant_client") import os GOOGLE_API_KEY = "" # add your GOOGLE API key here os.environ["GOOGLE_API_KEY"] = GOOGLE_API_KEY from pathlib import Path import random from typing import Optional def get_image_files( dir_path, sample: Optional[int] = 10, shuffle: bool = False ): dir_path = Path(dir_path) image_paths = [] for image_path in dir_path.glob("*.jpg"): image_paths.append(image_path) random.shuffle(image_paths) if sample: return image_paths[:sample] else: return image_paths image_files = get_image_files("SROIE2019/test/img", sample=100) from pydantic import BaseModel, Field class ReceiptInfo(BaseModel): company: str = Field(..., description="Company name") date: str = Field(..., description="Date field in DD/MM/YYYY format") address: str = Field(..., description="Address") total: float = Field(..., description="total amount") currency: str = Field( ..., description="Currency of the country (in abbreviations)" ) summary: str = Field( ..., description="Extracted text summary of the receipt, including items purchased, the type of store, the location, and any other notable salient features (what does the purchase seem to be for?).", ) from llama_index.multi_modal_llms.gemini import GeminiMultiModal from llama_index.core.program import MultiModalLLMCompletionProgram from llama_index.core.output_parsers import PydanticOutputParser prompt_template_str = """\ Can you summarize the image and return a response \ with the following JSON format: \ """ async def pydantic_gemini(output_class, image_documents, prompt_template_str): gemini_llm = GeminiMultiModal( api_key=GOOGLE_API_KEY, model_name="models/gemini-pro-vision" ) llm_program = MultiModalLLMCompletionProgram.from_defaults( output_parser=PydanticOutputParser(output_class), image_documents=image_documents, prompt_template_str=prompt_template_str, multi_modal_llm=gemini_llm, verbose=True, ) response = await llm_program.acall() return response from llama_index.core import SimpleDirectoryReader from llama_index.core.async_utils import run_jobs async def aprocess_image_file(image_file): print(f"Image file: {image_file}") img_docs = SimpleDirectoryReader(input_files=[image_file]).load_data() output = await pydantic_gemini(ReceiptInfo, img_docs, prompt_template_str) return output async def aprocess_image_files(image_files): """Process metadata on image files.""" new_docs = [] tasks = [] for image_file in image_files: task = aprocess_image_file(image_file) tasks.append(task) outputs = await run_jobs(tasks, show_progress=True, workers=5) return outputs outputs = await aprocess_image_files(image_files) outputs[4] from llama_index.core.schema import TextNode from typing import List def get_nodes_from_objs( objs: List[ReceiptInfo], image_files: List[str] ) -> TextNode: """Get nodes from objects.""" nodes = [] for image_file, obj in zip(image_files, objs): node = TextNode( text=obj.summary, metadata={ "company": obj.company, "date": obj.date, "address": obj.address, "total": obj.total, "currency": obj.currency, "image_file": str(image_file), }, excluded_embed_metadata_keys=["image_file"], excluded_llm_metadata_keys=["image_file"], ) nodes.append(node) return nodes nodes = get_nodes_from_objs(outputs, image_files) print(nodes[0].get_content(metadata_mode="all")) import qdrant_client from llama_index.vector_stores.qdrant import QdrantVectorStore from llama_index.core import StorageContext from llama_index.core import VectorStoreIndex from llama_index.embeddings.gemini import GeminiEmbedding from llama_index.llms.gemini import Gemini from llama_index.core import Settings client = qdrant_client.QdrantClient(path="qdrant_gemini") vector_store = QdrantVectorStore(client=client, collection_name="collection") Settings.embed_model = GeminiEmbedding( model_name="models/embedding-001", api_key=GOOGLE_API_KEY ) Settings.llm = (
Gemini(api_key=GOOGLE_API_KEY)
llama_index.llms.gemini.Gemini
from llama_hub.semanticscholar.base import SemanticScholarReader import os import openai from llama_index.llms import OpenAI from llama_index.query_engine import CitationQueryEngine from llama_index import ( VectorStoreIndex, StorageContext, load_index_from_storage, ServiceContext, ) from llama_index.response.notebook_utils import display_response s2reader = SemanticScholarReader() openai.api_key = os.environ["OPENAI_API_KEY"] service_context = ServiceContext.from_defaults( llm=OpenAI(model="gpt-3.5-turbo", temperature=0) ) query_space = "large language models" full_text = True total_papers = 50 persist_dir = ( "./citation_" + query_space + "_" + str(total_papers) + "_" + str(full_text) ) if not os.path.exists(persist_dir): documents = s2reader.load_data(query_space, total_papers, full_text=full_text) index =
VectorStoreIndex.from_documents(documents, service_context=service_context)
llama_index.VectorStoreIndex.from_documents
get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia') get_ipython().system('pip install llama-index') get_ipython().system('pip install duckdb duckdb-engine') import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core import SQLDatabase, SimpleDirectoryReader, Document from llama_index.readers.wikipedia import WikipediaReader from llama_index.core.query_engine import NLSQLTableQueryEngine from llama_index.core.indices.struct_store import SQLTableRetrieverQueryEngine from IPython.display import Markdown, display from sqlalchemy import ( create_engine, MetaData, Table, Column, String, Integer, select, column, ) engine = create_engine("duckdb:///:memory:") metadata_obj = MetaData() table_name = "city_stats" city_stats_table = Table( table_name, metadata_obj, Column("city_name", String(16), primary_key=True), Column("population", Integer), Column("country", String(16), nullable=False), ) metadata_obj.create_all(engine) metadata_obj.tables.keys() from sqlalchemy import insert rows = [ {"city_name": "Toronto", "population": 2930000, "country": "Canada"}, {"city_name": "Tokyo", "population": 13960000, "country": "Japan"}, { "city_name": "Chicago", "population": 2679000, "country": "United States", }, {"city_name": "Seoul", "population": 9776000, "country": "South Korea"}, ] for row in rows: stmt = insert(city_stats_table).values(**row) with engine.begin() as connection: cursor = connection.execute(stmt) with engine.connect() as connection: cursor = connection.exec_driver_sql("SELECT * FROM city_stats") print(cursor.fetchall()) from llama_index.core import SQLDatabase sql_database = SQLDatabase(engine, include_tables=["city_stats"]) query_engine = NLSQLTableQueryEngine(sql_database) response = query_engine.query("Which city has the highest population?") str(response) response.metadata engine = create_engine("duckdb:///:memory:") metadata_obj = MetaData() table_name = "city_stats" city_stats_table = Table( table_name, metadata_obj, Column("city_name", String(16), primary_key=True), Column("population", Integer), Column("country", String(16), nullable=False), ) all_table_names = ["city_stats"] n = 100 for i in range(n): tmp_table_name = f"tmp_table_{i}" tmp_table = Table( tmp_table_name, metadata_obj, Column(f"tmp_field_{i}_1", String(16), primary_key=True), Column(f"tmp_field_{i}_2", Integer), Column(f"tmp_field_{i}_3", String(16), nullable=False), ) all_table_names.append(f"tmp_table_{i}") metadata_obj.create_all(engine) from sqlalchemy import insert rows = [ {"city_name": "Toronto", "population": 2930000, "country": "Canada"}, {"city_name": "Tokyo", "population": 13960000, "country": "Japan"}, { "city_name": "Chicago", "population": 2679000, "country": "United States", }, {"city_name": "Seoul", "population": 9776000, "country": "South Korea"}, ] for row in rows: stmt = insert(city_stats_table).values(**row) with engine.begin() as connection: cursor = connection.execute(stmt) sql_database = SQLDatabase(engine, include_tables=["city_stats"]) from llama_index.core.indices.struct_store import SQLTableRetrieverQueryEngine from llama_index.core.objects import ( SQLTableNodeMapping, ObjectIndex, SQLTableSchema, ) from llama_index.core import VectorStoreIndex table_node_mapping =
SQLTableNodeMapping(sql_database)
llama_index.core.objects.SQLTableNodeMapping
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('wget "https://github.com/ppasupat/WikiTableQuestions/releases/download/v1.0.2/WikiTableQuestions-1.0.2-compact.zip" -O data.zip') get_ipython().system('unzip data.zip') import pandas as pd from pathlib import Path data_dir = Path("./WikiTableQuestions/csv/200-csv") csv_files = sorted([f for f in data_dir.glob("*.csv")]) dfs = [] for csv_file in csv_files: print(f"processing file: {csv_file}") try: df = pd.read_csv(csv_file) dfs.append(df) except Exception as e: print(f"Error parsing {csv_file}: {str(e)}") tableinfo_dir = "WikiTableQuestions_TableInfo" get_ipython().system('mkdir {tableinfo_dir}') from llama_index.core.program import LLMTextCompletionProgram from llama_index.core.bridge.pydantic import BaseModel, Field from llama_index.llms.openai import OpenAI class TableInfo(BaseModel): """Information regarding a structured table.""" table_name: str = Field( ..., description="table name (must be underscores and NO spaces)" ) table_summary: str = Field( ..., description="short, concise summary/caption of the table" ) prompt_str = """\ Give me a summary of the table with the following JSON format. - The table name must be unique to the table and describe it while being concise. - Do NOT output a generic table name (e.g. table, my_table). Do NOT make the table name one of the following: {exclude_table_name_list} Table: {table_str} Summary: """ program = LLMTextCompletionProgram.from_defaults( output_cls=TableInfo, llm=OpenAI(model="gpt-3.5-turbo"), prompt_template_str=prompt_str, ) import json def _get_tableinfo_with_index(idx: int) -> str: results_gen = Path(tableinfo_dir).glob(f"{idx}_*") results_list = list(results_gen) if len(results_list) == 0: return None elif len(results_list) == 1: path = results_list[0] return TableInfo.parse_file(path) else: raise ValueError( f"More than one file matching index: {list(results_gen)}" ) table_names = set() table_infos = [] for idx, df in enumerate(dfs): table_info = _get_tableinfo_with_index(idx) if table_info: table_infos.append(table_info) else: while True: df_str = df.head(10).to_csv() table_info = program( table_str=df_str, exclude_table_name_list=str(list(table_names)), ) table_name = table_info.table_name print(f"Processed table: {table_name}") if table_name not in table_names: table_names.add(table_name) break else: print(f"Table name {table_name} already exists, trying again.") pass out_file = f"{tableinfo_dir}/{idx}_{table_name}.json" json.dump(table_info.dict(), open(out_file, "w")) table_infos.append(table_info) from sqlalchemy import ( create_engine, MetaData, Table, Column, String, Integer, ) import re def sanitize_column_name(col_name): return re.sub(r"\W+", "_", col_name) def create_table_from_dataframe( df: pd.DataFrame, table_name: str, engine, metadata_obj ): sanitized_columns = {col: sanitize_column_name(col) for col in df.columns} df = df.rename(columns=sanitized_columns) columns = [ Column(col, String if dtype == "object" else Integer) for col, dtype in zip(df.columns, df.dtypes) ] table = Table(table_name, metadata_obj, *columns) metadata_obj.create_all(engine) with engine.connect() as conn: for _, row in df.iterrows(): insert_stmt = table.insert().values(**row.to_dict()) conn.execute(insert_stmt) conn.commit() engine = create_engine("sqlite:///:memory:") metadata_obj = MetaData() for idx, df in enumerate(dfs): tableinfo = _get_tableinfo_with_index(idx) print(f"Creating table: {tableinfo.table_name}") create_table_from_dataframe(df, tableinfo.table_name, engine, metadata_obj) import phoenix as px import llama_index.core px.launch_app() llama_index.core.set_global_handler("arize_phoenix") from llama_index.core.objects import ( SQLTableNodeMapping, ObjectIndex, SQLTableSchema, ) from llama_index.core import SQLDatabase, VectorStoreIndex sql_database = SQLDatabase(engine) table_node_mapping = SQLTableNodeMapping(sql_database) table_schema_objs = [ SQLTableSchema(table_name=t.table_name, context_str=t.table_summary) for t in table_infos ] # add a SQLTableSchema for each table obj_index = ObjectIndex.from_objects( table_schema_objs, table_node_mapping, VectorStoreIndex, ) obj_retriever = obj_index.as_retriever(similarity_top_k=3) from llama_index.core.retrievers import SQLRetriever from typing import List from llama_index.core.query_pipeline import FnComponent sql_retriever =
SQLRetriever(sql_database)
llama_index.core.retrievers.SQLRetriever
get_ipython().run_line_magic('pip', 'install llama-index-finetuning') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-finetuning-callbacks') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('pip', 'install llama-index-program-openai') import nest_asyncio nest_asyncio.apply() import os import openai os.environ["OPENAI_API_KEY"] = "sk-..." openai.api_key = os.environ["OPENAI_API_KEY"] from llama_index.program.openai import OpenAIPydanticProgram from pydantic import BaseModel from llama_index.llms.openai import OpenAI from llama_index.finetuning.callbacks import OpenAIFineTuningHandler from llama_index.core.callbacks import CallbackManager from typing import List class Song(BaseModel): """Data model for a song.""" title: str length_seconds: int class Album(BaseModel): """Data model for an album.""" name: str artist: str songs: List[Song] finetuning_handler = OpenAIFineTuningHandler() callback_manager = CallbackManager([finetuning_handler]) llm = OpenAI(model="gpt-4", callback_manager=callback_manager) prompt_template_str = """\ Generate an example album, with an artist and a list of songs. \ Using the movie {movie_name} as inspiration.\ """ program = OpenAIPydanticProgram.from_defaults( output_cls=Album, prompt_template_str=prompt_template_str, llm=llm, verbose=False, ) movie_names = [ "The Shining", "The Departed", "Titanic", "Goodfellas", "Pretty Woman", "Home Alone", "Caged Fury", "Edward Scissorhands", "Total Recall", "Ghost", "Tremors", "RoboCop", "Rocky V", ] from tqdm.notebook import tqdm for movie_name in tqdm(movie_names): output = program(movie_name=movie_name) print(output.json()) finetuning_handler.save_finetuning_events("mock_finetune_songs.jsonl") get_ipython().system('cat mock_finetune_songs.jsonl') from llama_index.finetuning import OpenAIFinetuneEngine finetune_engine = OpenAIFinetuneEngine( "gpt-3.5-turbo", "mock_finetune_songs.jsonl", validate_json=False, # openai validate json code doesn't support function calling yet ) finetune_engine.finetune() finetune_engine.get_current_job() ft_llm = finetune_engine.get_finetuned_model(temperature=0.3) ft_program = OpenAIPydanticProgram.from_defaults( output_cls=Album, prompt_template_str=prompt_template_str, llm=ft_llm, verbose=False, ) ft_program(movie_name="Goodfellas") get_ipython().system('mkdir data && wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') from pydantic import Field from typing import List class Citation(BaseModel): """Citation class.""" author: str = Field( ..., description="Inferred first author (usually last name" ) year: int = Field(..., description="Inferred year") desc: str = Field( ..., description=( "Inferred description from the text of the work that the author is" " cited for" ), ) class Response(BaseModel): """List of author citations. Extracted over unstructured text. """ citations: List[Citation] = Field( ..., description=( "List of author citations (organized by author, year, and" " description)." ), ) from llama_index.readers.file import PyMuPDFReader from llama_index.core import Document from llama_index.core.node_parser import SentenceSplitter from pathlib import Path loader = PyMuPDFReader() docs0 = loader.load(file_path=Path("./data/llama2.pdf")) doc_text = "\n\n".join([d.get_content() for d in docs0]) metadata = { "paper_title": "Llama 2: Open Foundation and Fine-Tuned Chat Models" } docs = [Document(text=doc_text, metadata=metadata)] chunk_size = 1024 node_parser = SentenceSplitter(chunk_size=chunk_size) nodes = node_parser.get_nodes_from_documents(docs) len(nodes) from llama_index.core import Settings finetuning_handler =
OpenAIFineTuningHandler()
llama_index.finetuning.callbacks.OpenAIFineTuningHandler
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-web') get_ipython().run_line_magic('pip', 'install llama-index-tools-google') from llama_index.readers.web import SimpleWebPageReader reader = SimpleWebPageReader(html_to_text=True) docs = reader.load_data(urls=["https://eugeneyan.com/writing/llm-patterns/"]) print(docs[0].get_content()[:400]) from llama_index.core import VectorStoreIndex index = VectorStoreIndex.from_documents(docs) query_engine = index.as_query_engine() response = query_engine.query("What are ways to evaluate LLMs?") print(str(response)) from llama_index.tools.google import GmailToolSpec tool_spec =
GmailToolSpec()
llama_index.tools.google.GmailToolSpec
get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-qdrant') get_ipython().run_line_magic('pip', 'install llama_index ftfy regex tqdm') get_ipython().run_line_magic('pip', 'install git+https://github.com/openai/CLIP.git') get_ipython().run_line_magic('pip', 'install torch torchvision') get_ipython().run_line_magic('pip', 'install matplotlib scikit-image') get_ipython().run_line_magic('pip', 'install -U qdrant_client') import os OPENAI_API_TOKEN = "sk-" os.environ["OPENAI_API_KEY"] = OPENAI_API_TOKEN import wikipedia import urllib.request from pathlib import Path image_path = Path("mixed_wiki") image_uuid = 0 image_metadata_dict = {} MAX_IMAGES_PER_WIKI = 30 wiki_titles = [ "Vincent van Gogh", "San Francisco", "Batman", "iPhone", "Tesla Model S", "BTS band", ] if not image_path.exists(): Path.mkdir(image_path) for title in wiki_titles: images_per_wiki = 0 print(title) try: page_py = wikipedia.page(title) list_img_urls = page_py.images for url in list_img_urls: if url.endswith(".jpg") or url.endswith(".png"): image_uuid += 1 image_file_name = title + "_" + url.split("/")[-1] image_metadata_dict[image_uuid] = { "filename": image_file_name, "img_path": "./" + str(image_path / f"{image_uuid}.jpg"), } urllib.request.urlretrieve( url, image_path / f"{image_uuid}.jpg" ) images_per_wiki += 1 if images_per_wiki > MAX_IMAGES_PER_WIKI: break except: print(str(Exception("No images found for Wikipedia page: ")) + title) continue from PIL import Image import matplotlib.pyplot as plt import os image_paths = [] for img_path in os.listdir("./mixed_wiki"): image_paths.append(str(os.path.join("./mixed_wiki", img_path))) def plot_images(image_paths): images_shown = 0 plt.figure(figsize=(16, 9)) for img_path in image_paths: if os.path.isfile(img_path): image = Image.open(img_path) plt.subplot(3, 3, images_shown + 1) plt.imshow(image) plt.xticks([]) plt.yticks([]) images_shown += 1 if images_shown >= 9: break plot_images(image_paths) from llama_index.core.indices import MultiModalVectorStoreIndex from llama_index.vector_stores.qdrant import QdrantVectorStore from llama_index.core import SimpleDirectoryReader, StorageContext import qdrant_client from llama_index.core import SimpleDirectoryReader client = qdrant_client.QdrantClient(path="qdrant_img_db") text_store = QdrantVectorStore( client=client, collection_name="text_collection" ) image_store = QdrantVectorStore( client=client, collection_name="image_collection" ) storage_context = StorageContext.from_defaults( vector_store=text_store, image_store=image_store ) documents = SimpleDirectoryReader("./mixed_wiki/").load_data() index = MultiModalVectorStoreIndex.from_documents( documents, storage_context=storage_context, ) input_image = "./mixed_wiki/2.jpg" plot_images([input_image]) retriever_engine = index.as_retriever(image_similarity_top_k=4) retrieval_results = retriever_engine.image_to_image_retrieve( "./mixed_wiki/2.jpg" ) retrieved_images = [] for res in retrieval_results: retrieved_images.append(res.node.metadata["file_path"]) plot_images(retrieved_images[1:]) from llama_index.multi_modal_llms.openai import OpenAIMultiModal from llama_index.core import SimpleDirectoryReader from llama_index.core.schema import ImageDocument image_documents = [
ImageDocument(image_path=input_image)
llama_index.core.schema.ImageDocument
get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-multi-modal-llms-replicate') import os OPENAI_API_TOKEN = "sk-<your-openai-api-token>" os.environ["OPENAI_API_KEY"] = OPENAI_API_TOKEN REPLICATE_API_TOKEN = "" # Your Relicate API token here os.environ["REPLICATE_API_TOKEN"] = REPLICATE_API_TOKEN from pathlib import Path input_image_path = Path("restaurant_images") if not input_image_path.exists(): Path.mkdir(input_image_path) get_ipython().system('wget "https://docs.google.com/uc?export=download&id=1GlqcNJhGGbwLKjJK1QJ_nyswCTQ2K2Fq" -O ./restaurant_images/fried_chicken.png') from pydantic import BaseModel class Restaurant(BaseModel): """Data model for an restaurant.""" restaurant: str food: str discount: str price: str rating: str review: str from llama_index.multi_modal_llms.openai import OpenAIMultiModal from llama_index.core import SimpleDirectoryReader image_documents = SimpleDirectoryReader("./restaurant_images").load_data() openai_mm_llm = OpenAIMultiModal( model="gpt-4-vision-preview", api_key=OPENAI_API_TOKEN, max_new_tokens=1000 ) from PIL import Image import matplotlib.pyplot as plt imageUrl = "./restaurant_images/fried_chicken.png" image = Image.open(imageUrl).convert("RGB") plt.figure(figsize=(16, 5)) plt.imshow(image) from llama_index.core.program import MultiModalLLMCompletionProgram from llama_index.core.output_parsers import PydanticOutputParser prompt_template_str = """\ can you summarize what is in the image\ and return the answer with json format \ """ openai_program = MultiModalLLMCompletionProgram.from_defaults( output_parser=
PydanticOutputParser(Restaurant)
llama_index.core.output_parsers.PydanticOutputParser
get_ipython().run_line_magic('pip', 'install llama-index-agent-openai') get_ipython().run_line_magic('pip', 'install llama-index-vector-stores-pinecone') get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") from llama_index.core import SimpleDirectoryReader documents = SimpleDirectoryReader("./data/paul_graham/").load_data() from llama_index.llms.openai import OpenAI from llama_index.core import Settings from llama_index.core import StorageContext, VectorStoreIndex from llama_index.core import SummaryIndex Settings.llm = OpenAI() Settings.chunk_size = 1024 nodes = Settings.node_parser.get_nodes_from_documents(documents) storage_context = StorageContext.from_defaults() storage_context.docstore.add_documents(nodes) summary_index = SummaryIndex(nodes, storage_context=storage_context) vector_index = VectorStoreIndex(nodes, storage_context=storage_context) summary_query_engine = summary_index.as_query_engine( response_mode="tree_summarize", use_async=True, ) vector_query_engine = vector_index.as_query_engine() from llama_index.core.tools import QueryEngineTool summary_tool = QueryEngineTool.from_defaults( query_engine=summary_query_engine, name="summary_tool", description=( "Useful for summarization questions related to the author's life" ), ) vector_tool = QueryEngineTool.from_defaults( query_engine=vector_query_engine, name="vector_tool", description=( "Useful for retrieving specific context to answer specific questions about the author's life" ), ) from llama_index.agent.openai import OpenAIAssistantAgent agent = OpenAIAssistantAgent.from_new( name="QA bot", instructions="You are a bot designed to answer questions about the author", openai_tools=[], tools=[summary_tool, vector_tool], verbose=True, run_retrieve_sleep_time=1.0, ) response = agent.chat("Can you give me a summary about the author's life?") print(str(response)) response = agent.query("What did the author do after RICS?") print(str(response)) import pinecone import os api_key = os.environ["PINECONE_API_KEY"] pinecone.init(api_key=api_key, environment="us-west1-gcp") try: pinecone.create_index( "quickstart", dimension=1536, metric="euclidean", pod_type="p1" ) except Exception: pass pinecone_index = pinecone.Index("quickstart") pinecone_index.delete(deleteAll=True, namespace="test") from llama_index.core import VectorStoreIndex, StorageContext from llama_index.vector_stores.pinecone import PineconeVectorStore from llama_index.core.schema import TextNode nodes = [ TextNode( text=( "Michael Jordan is a retired professional basketball player," " widely regarded as one of the greatest basketball players of all" " time." ), metadata={ "category": "Sports", "country": "United States", }, ), TextNode( text=( "Angelina Jolie is an American actress, filmmaker, and" " humanitarian. She has received numerous awards for her acting" " and is known for her philanthropic work." ), metadata={ "category": "Entertainment", "country": "United States", }, ), TextNode( text=( "Elon Musk is a business magnate, industrial designer, and" " engineer. He is the founder, CEO, and lead designer of SpaceX," " Tesla, Inc., Neuralink, and The Boring Company." ), metadata={ "category": "Business", "country": "United States", }, ), TextNode( text=( "Rihanna is a Barbadian singer, actress, and businesswoman. She" " has achieved significant success in the music industry and is" " known for her versatile musical style." ), metadata={ "category": "Music", "country": "Barbados", }, ), TextNode( text=( "Cristiano Ronaldo is a Portuguese professional footballer who is" " considered one of the greatest football players of all time. He" " has won numerous awards and set multiple records during his" " career." ), metadata={ "category": "Sports", "country": "Portugal", }, ), ] vector_store = PineconeVectorStore( pinecone_index=pinecone_index, namespace="test" ) storage_context = StorageContext.from_defaults(vector_store=vector_store) index = VectorStoreIndex(nodes, storage_context=storage_context) from llama_index.core.tools import FunctionTool from llama_index.core.vector_stores import ( VectorStoreInfo, MetadataInfo, ExactMatchFilter, MetadataFilters, ) from llama_index.core.retrievers import VectorIndexRetriever from llama_index.core.query_engine import RetrieverQueryEngine from typing import List, Tuple, Any from pydantic import BaseModel, Field top_k = 3 vector_store_info = VectorStoreInfo( content_info="brief biography of celebrities", metadata_info=[ MetadataInfo( name="category", type="str", description=( "Category of the celebrity, one of [Sports, Entertainment," " Business, Music]" ), ), MetadataInfo( name="country", type="str", description=( "Country of the celebrity, one of [United States, Barbados," " Portugal]" ), ), ], ) class AutoRetrieveModel(BaseModel): query: str = Field(..., description="natural language query string") filter_key_list: List[str] = Field( ..., description="List of metadata filter field names" ) filter_value_list: List[str] = Field( ..., description=( "List of metadata filter field values (corresponding to names" " specified in filter_key_list)" ), ) def auto_retrieve_fn( query: str, filter_key_list: List[str], filter_value_list: List[str] ): """Auto retrieval function. Performs auto-retrieval from a vector database, and then applies a set of filters. """ query = query or "Query" exact_match_filters = [ ExactMatchFilter(key=k, value=v) for k, v in zip(filter_key_list, filter_value_list) ] retriever = VectorIndexRetriever( index, filters=MetadataFilters(filters=exact_match_filters), top_k=top_k, ) results = retriever.retrieve(query) return [r.get_content() for r in results] description = f"""\ Use this tool to look up biographical information about celebrities. The vector database schema is given below: {vector_store_info.json()} """ auto_retrieve_tool = FunctionTool.from_defaults( fn=auto_retrieve_fn, name="celebrity_bios", description=description, fn_schema=AutoRetrieveModel, ) auto_retrieve_fn( "celebrity from the United States", filter_key_list=["country"], filter_value_list=["United States"], ) from llama_index.agent.openai import OpenAIAssistantAgent agent = OpenAIAssistantAgent.from_new( name="Celebrity bot", instructions="You are a bot designed to answer questions about celebrities.", tools=[auto_retrieve_tool], verbose=True, ) response = agent.chat("Tell me about two celebrities from the United States. ") print(str(response)) from sqlalchemy import ( create_engine, MetaData, Table, Column, String, Integer, select, column, ) from llama_index.core import SQLDatabase from llama_index.core.indices import SQLStructStoreIndex engine = create_engine("sqlite:///:memory:", future=True) metadata_obj = MetaData() table_name = "city_stats" city_stats_table = Table( table_name, metadata_obj, Column("city_name", String(16), primary_key=True), Column("population", Integer), Column("country", String(16), nullable=False), ) metadata_obj.create_all(engine) metadata_obj.tables.keys() from sqlalchemy import insert rows = [ {"city_name": "Toronto", "population": 2930000, "country": "Canada"}, {"city_name": "Tokyo", "population": 13960000, "country": "Japan"}, {"city_name": "Berlin", "population": 3645000, "country": "Germany"}, ] for row in rows: stmt = insert(city_stats_table).values(**row) with engine.begin() as connection: cursor = connection.execute(stmt) with engine.connect() as connection: cursor = connection.exec_driver_sql("SELECT * FROM city_stats") print(cursor.fetchall()) sql_database = SQLDatabase(engine, include_tables=["city_stats"]) from llama_index.core.query_engine import NLSQLTableQueryEngine query_engine = NLSQLTableQueryEngine( sql_database=sql_database, tables=["city_stats"], ) get_ipython().system('pip install wikipedia') from llama_index.readers.wikipedia import WikipediaReader from llama_index.core import SimpleDirectoryReader, VectorStoreIndex cities = ["Toronto", "Berlin", "Tokyo"] wiki_docs =
WikipediaReader()
llama_index.readers.wikipedia.WikipediaReader
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-indices-managed-vectara') get_ipython().system('pip install llama-index') import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) from llama_index.core.schema import TextNode from llama_index.core.indices.managed.types import ManagedIndexQueryMode from llama_index.indices.managed.vectara import VectaraIndex from llama_index.indices.managed.vectara import VectaraAutoRetriever from llama_index.core.vector_stores import MetadataInfo, VectorStoreInfo from llama_index.llms.openai import OpenAI nodes = [ TextNode( text=( "A pragmatic paleontologist touring an almost complete theme park on an island " + "in Central America is tasked with protecting a couple of kids after a power " + "failure causes the park's cloned dinosaurs to run loose." ), metadata={"year": 1993, "rating": 7.7, "genre": "science fiction"}, ), TextNode( text=( "A thief who steals corporate secrets through the use of dream-sharing technology " + "is given the inverse task of planting an idea into the mind of a C.E.O., " + "but his tragic past may doom the project and his team to disaster." ), metadata={ "year": 2010, "director": "Christopher Nolan", "rating": 8.2, }, ), TextNode( text="Barbie suffers a crisis that leads her to question her world and her existence.", metadata={ "year": 2023, "director": "Greta Gerwig", "genre": "fantasy", "rating": 9.5, }, ),
TextNode( text=( "A cowboy doll is profoundly threatened and jealous when a new spaceman action " + "figure supplants him as top toy in a boy's bedroom." )
llama_index.core.schema.TextNode
get_ipython().run_line_magic('pip', 'install llama-index-question-gen-openai') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') from IPython.display import Markdown, display def display_prompt_dict(prompts_dict): for k, p in prompts_dict.items(): text_md = f"**Prompt Key**: {k}<br>" f"**Text:** <br>" display(Markdown(text_md)) print(p.get_template()) display(Markdown("<br><br>")) from llama_index.core.selectors import LLMSingleSelector, LLMMultiSelector from llama_index.core.selectors import ( PydanticMultiSelector, PydanticSingleSelector, ) selector = LLMMultiSelector.from_defaults() from llama_index.core.tools import ToolMetadata tool_choices = [ ToolMetadata( name="covid_nyt", description=("This tool contains a NYT news article about COVID-19"), ), ToolMetadata( name="covid_wiki", description=("This tool contains the Wikipedia page about COVID-19"), ), ToolMetadata( name="covid_tesla", description=("This tool contains the Wikipedia page about apples"), ), ] display_prompt_dict(selector.get_prompts()) selector_result = selector.select( tool_choices, query="Tell me more about COVID-19" ) selector_result.selections from llama_index.core import PromptTemplate from llama_index.llms.openai import OpenAI query_gen_str = """\ You are a helpful assistant that generates multiple search queries based on a \ single input query. Generate {num_queries} search queries, one on each line, \ related to the following input query: Query: {query} Queries: """ query_gen_prompt = PromptTemplate(query_gen_str) llm = OpenAI(model="gpt-3.5-turbo") def generate_queries(query: str, llm, num_queries: int = 4): response = llm.predict( query_gen_prompt, num_queries=num_queries, query=query ) queries = response.split("\n") queries_str = "\n".join(queries) print(f"Generated queries:\n{queries_str}") return queries queries = generate_queries("What happened at Interleaf and Viaweb?", llm) queries from llama_index.core.indices.query.query_transform import HyDEQueryTransform from llama_index.llms.openai import OpenAI hyde = HyDEQueryTransform(include_original=True) llm = OpenAI(model="gpt-3.5-turbo") query_bundle = hyde.run("What is Bel?") new_query.custom_embedding_strs from llama_index.core.question_gen import LLMQuestionGenerator from llama_index.question_gen.openai import OpenAIQuestionGenerator from llama_index.llms.openai import OpenAI llm = OpenAI() question_gen = OpenAIQuestionGenerator.from_defaults(llm=llm) display_prompt_dict(question_gen.get_prompts()) from llama_index.core.tools import ToolMetadata tool_choices = [ ToolMetadata( name="uber_2021_10k", description=( "Provides information about Uber financials for year 2021" ), ), ToolMetadata( name="lyft_2021_10k", description=( "Provides information about Lyft financials for year 2021" ), ), ] from llama_index.core import QueryBundle query_str = "Compare and contrast Uber and Lyft" choices = question_gen.generate(tool_choices, QueryBundle(query_str=query_str)) choices from llama_index.core.agent import ReActChatFormatter from llama_index.core.agent.react.output_parser import ReActOutputParser from llama_index.core.tools import FunctionTool from llama_index.core.llms import ChatMessage def execute_sql(sql: str) -> str: """Given a SQL input string, execute it.""" return f"Executed {sql}" def add(a: int, b: int) -> int: """Add two numbers.""" return a + b tool1 = FunctionTool.from_defaults(fn=execute_sql) tool2 = FunctionTool.from_defaults(fn=add) tools = [tool1, tool2] chat_formatter =
ReActChatFormatter()
llama_index.core.agent.ReActChatFormatter
get_ipython().system('pip install llama-index-llms-dashscope') get_ipython().run_line_magic('env', 'DASHSCOPE_API_KEY=YOUR_DASHSCOPE_API_KEY') import os os.environ["DASHSCOPE_API_KEY"] = "YOUR_DASHSCOPE_API_KEY" from llama_index.llms.dashscope import DashScope, DashScopeGenerationModels dashscope_llm =
DashScope(model_name=DashScopeGenerationModels.QWEN_MAX)
llama_index.llms.dashscope.DashScope
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') get_ipython().run_line_magic('env', 'OPENAI_API_KEY=') get_ipython().run_line_magic('env', 'BRAINTRUST_API_KEY=') get_ipython().run_line_magic('env', 'TOKENIZERS_PARALLELISM=true # This is needed to avoid a warning message from Chroma') get_ipython().run_line_magic('pip', 'install -U llama_hub llama_index braintrust autoevals pypdf pillow transformers torch torchvision') get_ipython().system('mkdir data') get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') from pathlib import Path from llama_index.readers.file import PDFReader from llama_index.core.response.notebook_utils import display_source_node from llama_index.core.retrievers import RecursiveRetriever from llama_index.core.query_engine import RetrieverQueryEngine from llama_index.core import VectorStoreIndex from llama_index.llms.openai import OpenAI import json loader = PDFReader() docs0 = loader.load_data(file=Path("./data/llama2.pdf")) from llama_index.core import Document doc_text = "\n\n".join([d.get_content() for d in docs0]) docs = [Document(text=doc_text)] from llama_index.core.node_parser import SentenceSplitter from llama_index.core.schema import IndexNode node_parser = SentenceSplitter(chunk_size=1024) base_nodes = node_parser.get_nodes_from_documents(docs) for idx, node in enumerate(base_nodes): node.id_ = f"node-{idx}" from llama_index.core.embeddings import resolve_embed_model embed_model = resolve_embed_model("local:BAAI/bge-small-en") llm = OpenAI(model="gpt-3.5-turbo") base_index = VectorStoreIndex(base_nodes, embed_model=embed_model) base_retriever = base_index.as_retriever(similarity_top_k=2) retrievals = base_retriever.retrieve( "Can you tell me about the key concepts for safety finetuning" ) for n in retrievals: display_source_node(n, source_length=1500) query_engine_base = RetrieverQueryEngine.from_args(base_retriever, llm=llm) response = query_engine_base.query( "Can you tell me about the key concepts for safety finetuning" ) print(str(response)) sub_chunk_sizes = [128, 256, 512] sub_node_parsers = [SentenceSplitter(chunk_size=c) for c in sub_chunk_sizes] all_nodes = [] for base_node in base_nodes: for n in sub_node_parsers: sub_nodes = n.get_nodes_from_documents([base_node]) sub_inodes = [ IndexNode.from_text_node(sn, base_node.node_id) for sn in sub_nodes ] all_nodes.extend(sub_inodes) original_node = IndexNode.from_text_node(base_node, base_node.node_id) all_nodes.append(original_node) all_nodes_dict = {n.node_id: n for n in all_nodes} vector_index_chunk = VectorStoreIndex(all_nodes, embed_model=embed_model) vector_retriever_chunk = vector_index_chunk.as_retriever(similarity_top_k=2) retriever_chunk = RecursiveRetriever( "vector", retriever_dict={"vector": vector_retriever_chunk}, node_dict=all_nodes_dict, verbose=True, ) nodes = retriever_chunk.retrieve( "Can you tell me about the key concepts for safety finetuning" ) for node in nodes: display_source_node(node, source_length=2000) query_engine_chunk = RetrieverQueryEngine.from_args(retriever_chunk, llm=llm) response = query_engine_chunk.query( "Can you tell me about the key concepts for safety finetuning" ) print(str(response)) from llama_index.core.node_parser import SentenceSplitter from llama_index.core.schema import IndexNode from llama_index.core.extractors import ( SummaryExtractor, QuestionsAnsweredExtractor, ) extractors = [ SummaryExtractor(summaries=["self"], show_progress=True), QuestionsAnsweredExtractor(questions=5, show_progress=True), ] metadata_dicts = [] for extractor in extractors: metadata_dicts.extend(extractor.extract(base_nodes)) def save_metadata_dicts(path): with open(path, "w") as fp: for m in metadata_dicts: fp.write(json.dumps(m) + "\n") def load_metadata_dicts(path): with open(path, "r") as fp: metadata_dicts = [json.loads(l) for l in fp.readlines()] return metadata_dicts save_metadata_dicts("data/llama2_metadata_dicts.jsonl") metadata_dicts = load_metadata_dicts("data/llama2_metadata_dicts.jsonl") import copy all_nodes = copy.deepcopy(base_nodes) for idx, d in enumerate(metadata_dicts): inode_q = IndexNode( text=d["questions_this_excerpt_can_answer"], index_id=base_nodes[idx].node_id, ) inode_s = IndexNode( text=d["section_summary"], index_id=base_nodes[idx].node_id ) all_nodes.extend([inode_q, inode_s]) all_nodes_dict = {n.node_id: n for n in all_nodes} from llama_index.core import VectorStoreIndex from llama_index.llms.openai import OpenAI llm = OpenAI(model="gpt-3.5-turbo") vector_index_metadata = VectorStoreIndex(all_nodes) vector_retriever_metadata = vector_index_metadata.as_retriever( similarity_top_k=2 ) retriever_metadata = RecursiveRetriever( "vector", retriever_dict={"vector": vector_retriever_metadata}, node_dict=all_nodes_dict, verbose=True, ) nodes = retriever_metadata.retrieve( "Can you tell me about the key concepts for safety finetuning" ) for node in nodes: display_source_node(node, source_length=2000) query_engine_metadata = RetrieverQueryEngine.from_args( retriever_metadata, llm=llm ) response = query_engine_metadata.query( "Can you tell me about the key concepts for safety finetuning" ) print(str(response)) from llama_index.core.evaluation import ( generate_question_context_pairs, EmbeddingQAFinetuneDataset, ) import nest_asyncio nest_asyncio.apply() eval_dataset =
generate_question_context_pairs(base_nodes)
llama_index.core.evaluation.generate_question_context_pairs
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-llms-cohere') get_ipython().run_line_magic('pip', 'install llama-index-llms-gemini') get_ipython().system('pip install "google-generativeai" -q') import nest_asyncio nest_asyncio.apply() from llama_index.core.llama_dataset import download_llama_dataset pairwise_evaluator_dataset, _ = download_llama_dataset( "MtBenchHumanJudgementDataset", "./mt_bench_data" ) pairwise_evaluator_dataset.to_pandas()[:5] from llama_index.core.evaluation import PairwiseComparisonEvaluator from llama_index.llms.openai import OpenAI from llama_index.llms.gemini import Gemini from llama_index.llms.cohere import Cohere llm_gpt4 = OpenAI(temperature=0, model="gpt-4") llm_gpt35 = OpenAI(temperature=0, model="gpt-3.5-turbo") llm_gemini = Gemini(model="models/gemini-pro", temperature=0) evaluators = { "gpt-4": PairwiseComparisonEvaluator(llm=llm_gpt4), "gpt-3.5":
PairwiseComparisonEvaluator(llm=llm_gpt35)
llama_index.core.evaluation.PairwiseComparisonEvaluator
get_ipython().run_line_magic('pip', 'install llama-index-readers-file') get_ipython().run_line_magic('pip', 'install llama-index-agent-openai') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('pip install llama-index') import nest_asyncio nest_asyncio.apply() get_ipython().system("mkdir -p 'data/'") get_ipython().system('wget --user-agent "Mozilla" "https://arxiv.org/pdf/2307.09288.pdf" -O "data/llama2.pdf"') from pathlib import Path from llama_index.core import Document, VectorStoreIndex from llama_index.readers.file import PyMuPDFReader from llama_index.core.node_parser import SimpleNodeParser from llama_index.llms.openai import OpenAI loader = PyMuPDFReader() docs0 = loader.load(file_path=Path("./data/llama2.pdf")) doc_text = "\n\n".join([d.get_content() for d in docs0]) docs = [Document(text=doc_text)] node_parser = SimpleNodeParser.from_defaults() nodes = node_parser.get_nodes_from_documents(docs) len(nodes) get_ipython().system('wget "https://www.dropbox.com/scl/fi/fh9vsmmm8vu0j50l3ss38/llama2_eval_qr_dataset.json?rlkey=kkoaez7aqeb4z25gzc06ak6kb&dl=1" -O data/llama2_eval_qr_dataset.json') from llama_index.core.evaluation import QueryResponseDataset eval_dataset = QueryResponseDataset.from_json( "data/llama2_eval_qr_dataset.json" ) from llama_index.core.evaluation import DatasetGenerator, QueryResponseDataset from llama_index.llms.openai import OpenAI llm = OpenAI(model="gpt-4-1106-preview") dataset_generator = DatasetGenerator( nodes[:20], llm=llm, show_progress=True, num_questions_per_chunk=3, ) eval_dataset = await dataset_generator.agenerate_dataset_from_nodes(num=60) eval_dataset.save_json("data/llama2_eval_qr_dataset.json") eval_dataset = QueryResponseDataset.from_json( "data/llama2_eval_qr_dataset.json" ) from llama_index.core.evaluation.eval_utils import ( get_responses, get_results_df, ) from llama_index.core.evaluation import ( CorrectnessEvaluator, SemanticSimilarityEvaluator, BatchEvalRunner, ) from llama_index.llms.openai import OpenAI eval_llm = OpenAI(model="gpt-4-1106-preview") evaluator_c = CorrectnessEvaluator(llm=eval_llm) evaluator_s = SemanticSimilarityEvaluator(llm=eval_llm) evaluator_dict = { "correctness": evaluator_c, "semantic_similarity": evaluator_s, } batch_runner =
BatchEvalRunner(evaluator_dict, workers=2, show_progress=True)
llama_index.core.evaluation.BatchEvalRunner