code
stringlengths
161
67.2k
apis
sequencelengths
1
24
extract_api
stringlengths
164
53.3k
from llama_index import Document import json, os from llama_index.node_parser import SimpleNodeParser from llama_index import GPTTreeIndex, LLMPredictor, PromptHelper, GPTListIndex from langchain import OpenAI from llama_index.composability import ComposableGraph from llama_index.data_structs.node_v2 import Node, DocumentRelationship class ConfigLLM: # define LLM name = "gpt-3.5-turbo" llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo")) # define prompt helper # set maximum input size max_input_size = 2096 # set number of output tokens num_output = 256 # set maximum chunk overlap max_chunk_overlap = 20 prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap) def index_construct_and_save(timechunk_path: str, save_loc: str): for filename in os.listdir(timechunk_path): file = os.path.join(timechunk_path, filename) data = json.load(open(file=file, mode="r")) # keys, text = list(zip(*data.items())) nodes = [Node(text=text, doc_id=keys) for keys, text in data.items()] index = GPTTreeIndex(nodes=nodes) index.save_to_disk(f"{save_loc}/{filename}.json") def load_index_with_summary(index_loc: str): index_list = [] index_summary_list = [] for filename in os.listdir(index_loc): index_file = os.path.join(index_loc, filename) index = GPTTreeIndex.load_from_disk(index_file) summary = index.query( "What is the summary of this document chunk?", mode="summarize" ) index_summary_list.append(str(summary)) index_list.append(index) #! logging print("index list", len(index_list), index_list) return index_list, index_summary_list def compose_graph_and_save(index_loc: str, save_loc: str): index_list, index_summary_list = load_index_with_summary(index_loc) #! logging print(index_summary_list) graph = ComposableGraph.from_indices(GPTListIndex, index_list, index_summary_list) graph.save_to_disk(save_loc) def load_graph(graph_location: str): return ComposableGraph.load_from_disk(graph_location) def query_graph(query: str, graph: ComposableGraph): response = graph.query(query, query_configs=get_query_configs()) return response def parse_response(response: ComposableGraph.query): print("-" * 50) print(response) print("-" * 50) print( str(response), # response.source_nodes, [node_with_score.node.doc_id for node_with_score in response.source_nodes], # [node.ref_doc_id for node in response.source_nodes], response.get_formatted_sources(), sep="\n" + "+" * 80 + "\n", ) print("-" * 50) def query_composed_index(query: str, graph_loc: str): graph = load_graph(graph_loc) response = query_graph(query, graph) parse_response(response) def query_single_index(query: str, index_loc: str): index = GPTTreeIndex.load_from_disk(index_loc) response = index.query(query) parse_response(response) def get_query_configs(): # set query config query_configs = [ { "index_struct_type": "simple_dict", "query_mode": "default", "query_kwargs": {"similarity_top_k": 1}, }, { "index_struct_type": "keyword_table", "query_mode": "simple", "query_kwargs": {}, }, ] return query_configs
[ "llama_index.data_structs.node_v2.Node", "llama_index.GPTTreeIndex.load_from_disk", "llama_index.composability.ComposableGraph.from_indices", "llama_index.GPTTreeIndex", "llama_index.PromptHelper", "llama_index.composability.ComposableGraph.load_from_disk" ]
[((705, 764), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_output', 'max_chunk_overlap'], {}), '(max_input_size, num_output, max_chunk_overlap)\n', (717, 764), False, 'from llama_index import GPTTreeIndex, LLMPredictor, PromptHelper, GPTListIndex\n'), ((853, 879), 'os.listdir', 'os.listdir', (['timechunk_path'], {}), '(timechunk_path)\n', (863, 879), False, 'import json, os\n'), ((1328, 1349), 'os.listdir', 'os.listdir', (['index_loc'], {}), '(index_loc)\n', (1338, 1349), False, 'import json, os\n'), ((1966, 2040), 'llama_index.composability.ComposableGraph.from_indices', 'ComposableGraph.from_indices', (['GPTListIndex', 'index_list', 'index_summary_list'], {}), '(GPTListIndex, index_list, index_summary_list)\n', (1994, 2040), False, 'from llama_index.composability import ComposableGraph\n'), ((2124, 2170), 'llama_index.composability.ComposableGraph.load_from_disk', 'ComposableGraph.load_from_disk', (['graph_location'], {}), '(graph_location)\n', (2154, 2170), False, 'from llama_index.composability import ComposableGraph\n'), ((2974, 3012), 'llama_index.GPTTreeIndex.load_from_disk', 'GPTTreeIndex.load_from_disk', (['index_loc'], {}), '(index_loc)\n', (3001, 3012), False, 'from llama_index import GPTTreeIndex, LLMPredictor, PromptHelper, GPTListIndex\n'), ((896, 934), 'os.path.join', 'os.path.join', (['timechunk_path', 'filename'], {}), '(timechunk_path, filename)\n', (908, 934), False, 'import json, os\n'), ((1129, 1154), 'llama_index.GPTTreeIndex', 'GPTTreeIndex', ([], {'nodes': 'nodes'}), '(nodes=nodes)\n', (1141, 1154), False, 'from llama_index import GPTTreeIndex, LLMPredictor, PromptHelper, GPTListIndex\n'), ((1372, 1405), 'os.path.join', 'os.path.join', (['index_loc', 'filename'], {}), '(index_loc, filename)\n', (1384, 1405), False, 'import json, os\n'), ((1422, 1461), 'llama_index.GPTTreeIndex.load_from_disk', 'GPTTreeIndex.load_from_disk', (['index_file'], {}), '(index_file)\n', (1449, 1461), False, 'from llama_index import GPTTreeIndex, LLMPredictor, PromptHelper, GPTListIndex\n'), ((437, 486), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo"""'}), "(temperature=0, model_name='gpt-3.5-turbo')\n", (443, 486), False, 'from langchain import OpenAI\n'), ((1052, 1080), 'llama_index.data_structs.node_v2.Node', 'Node', ([], {'text': 'text', 'doc_id': 'keys'}), '(text=text, doc_id=keys)\n', (1056, 1080), False, 'from llama_index.data_structs.node_v2 import Node, DocumentRelationship\n')]
# This file has been modified by the Nextpy Team in 2023 using AI tools and automation scripts. # We have rigorously tested these modifications to ensure reliability and performance. Based on successful test results, we are confident in the quality and stability of these changes. """Base reader class.""" from abc import abstractmethod from typing import Any, List from nextpy.ai.schema import Document class BaseReader: """Utilities for loading data from a directory.""" @abstractmethod def load_data(self, *args: Any, **load_kwargs: Any) -> List[Document]: """Load data from the input directory.""" """Slack reader.""" import logging import os import time from datetime import datetime from ssl import SSLContext from typing import List, Optional from llama_index.readers.base import BaseReader from llama_index.schema import Document logger = logging.getLogger(__name__) class SlackReader(BaseReader): """Slack reader. Reads conversations from channels. If an earliest_date is provided, an optional latest_date can also be provided. If no latest_date is provided, we assume the latest date is the current timestamp. Args: slack_token (Optional[str]): Slack token. If not provided, we assume the environment variable `SLACK_BOT_TOKEN` is set. ssl (Optional[str]): Custom SSL context. If not provided, it is assumed there is already an SSL context available. earliest_date (Optional[datetime]): Earliest date from which to read conversations. If not provided, we read all messages. latest_date (Optional[datetime]): Latest date from which to read conversations. If not provided, defaults to current timestamp in combination with earliest_date. """ def __init__( self, slack_token: Optional[str] = None, ssl: Optional[SSLContext] = None, earliest_date: Optional[datetime] = None, latest_date: Optional[datetime] = None, ) -> None: """Initialize with parameters.""" from slack_sdk import WebClient if slack_token is None: slack_token = os.environ["SLACK_BOT_TOKEN"] if slack_token is None: raise ValueError( "Must specify `slack_token` or set environment " "variable `SLACK_BOT_TOKEN`." ) if ssl is None: self.client = WebClient(token=slack_token) else: self.client = WebClient(token=slack_token, ssl=ssl) if latest_date is not None and earliest_date is None: raise ValueError( "Must specify `earliest_date` if `latest_date` is specified." ) if earliest_date is not None: self.earliest_date_timestamp: Optional[float] = earliest_date.timestamp() else: self.earliest_date_timestamp = None if latest_date is not None: self.latest_date_timestamp = latest_date.timestamp() else: self.latest_date_timestamp = datetime.now().timestamp() res = self.client.api_test() if not res["ok"]: raise ValueError(f"Error initializing Slack API: {res['error']}") def _read_message(self, channel_id: str, message_ts: str) -> str: from slack_sdk.errors import SlackApiError """Read a message.""" messages_text: List[str] = [] next_cursor = None while True: try: # https://slack.com/api/conversations.replies # List all replies to a message, including the message itself. if self.earliest_date_timestamp is None: result = self.client.conversations_replies( channel=channel_id, ts=message_ts, cursor=next_cursor ) else: conversations_replies_kwargs = { "channel": channel_id, "ts": message_ts, "cursor": next_cursor, "latest": str(self.latest_date_timestamp), } if self.earliest_date_timestamp is not None: conversations_replies_kwargs["oldest"] = str( self.earliest_date_timestamp ) result = self.client.conversations_replies( **conversations_replies_kwargs # type: ignore ) messages = result["messages"] messages_text.extend(message["text"] for message in messages) if not result["has_more"]: break next_cursor = result["response_metadata"]["next_cursor"] except SlackApiError as e: if e.response["error"] == "ratelimited": logger.error( "Rate limit error reached, sleeping for: {} seconds".format( e.response.headers["retry-after"] ) ) time.sleep(int(e.response.headers["retry-after"])) else: logger.error("Error parsing conversation replies: {}".format(e)) return "\n\n".join(messages_text) def _read_channel(self, channel_id: str, reverse_chronological: bool) -> str: from slack_sdk.errors import SlackApiError """Read a channel.""" result_messages: List[str] = [] next_cursor = None while True: try: # Call the conversations.history method using the WebClient # conversations.history returns the first 100 messages by default # These results are paginated, # see: https://api.slack.com/methods/conversations.history$pagination conversations_history_kwargs = { "channel": channel_id, "cursor": next_cursor, "latest": str(self.latest_date_timestamp), } if self.earliest_date_timestamp is not None: conversations_history_kwargs["oldest"] = str( self.earliest_date_timestamp ) result = self.client.conversations_history( **conversations_history_kwargs # type: ignore ) conversation_history = result["messages"] # Print results logger.info( "{} messages found in {}".format( len(conversation_history), channel_id ) ) result_messages.extend( self._read_message(channel_id, message["ts"]) for message in conversation_history ) if not result["has_more"]: break next_cursor = result["response_metadata"]["next_cursor"] except SlackApiError as e: if e.response["error"] == "ratelimited": logger.error( "Rate limit error reached, sleeping for: {} seconds".format( e.response.headers["retry-after"] ) ) time.sleep(int(e.response.headers["retry-after"])) else: logger.error("Error parsing conversation replies: {}".format(e)) return ( "\n\n".join(result_messages) if reverse_chronological else "\n\n".join(result_messages[::-1]) ) def load_data( self, channel_ids: List[str], reverse_chronological: bool = True ) -> List[Document]: """Load data from the input directory. Args: channel_ids (List[str]): List of channel ids to read. Returns: List[Document]: List of documents. """ results = [] for channel_id in channel_ids: channel_content = self._read_channel( channel_id, reverse_chronological=reverse_chronological ) results.append( Document(text=channel_content, metadata={"channel": channel_id}) ) return results if __name__ == "__main__": reader = SlackReader() logger.info(reader.load_data(channel_ids=["C04DC2VUY3F"]))
[ "llama_index.schema.Document" ]
[((877, 904), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (894, 904), False, 'import logging\n'), ((2439, 2467), 'slack_sdk.WebClient', 'WebClient', ([], {'token': 'slack_token'}), '(token=slack_token)\n', (2448, 2467), False, 'from slack_sdk import WebClient\n'), ((2508, 2545), 'slack_sdk.WebClient', 'WebClient', ([], {'token': 'slack_token', 'ssl': 'ssl'}), '(token=slack_token, ssl=ssl)\n', (2517, 2545), False, 'from slack_sdk import WebClient\n'), ((8292, 8356), 'llama_index.schema.Document', 'Document', ([], {'text': 'channel_content', 'metadata': "{'channel': channel_id}"}), "(text=channel_content, metadata={'channel': channel_id})\n", (8300, 8356), False, 'from llama_index.schema import Document\n'), ((3072, 3086), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3084, 3086), False, 'from datetime import datetime\n')]
""" This is the documentaion of the Llama2-7B-chat model from hugging face models This model has 7 billion parameters develped by Meta This is used for QnA purposes on local machine for testing... Model hardware config: - GPU: Nvidia RTX 40 Series (12GB) --> CUDA support - RAM: 32GB - i7 processor 13th gen """ import torch from transformers import BitsAndBytesConfig from langchain.embeddings.huggingface import HuggingFaceInstructEmbeddings from llama_index.llms import HuggingFaceLLM from llama_index import ServiceContext, SimpleDirectoryReader, \ VectorStoreIndex, get_response_synthesizer, set_global_service_context from llama_index.retrievers import VectorIndexRetriever from llama_index.query_engine import RetrieverQueryEngine from llama_index.prompts import PromptTemplate from llama_index.storage.storage_context import StorageContext from llama_index.vector_stores import ChromaVectorStore from llama_index.postprocessor import SimilarityPostprocessor from chromadb import PersistentClient from chromadb.utils import embedding_functions from dotenv import load_dotenv from transformers import AutoTokenizer import os load_dotenv() HF_TOKEN = os.getenv("HF_TOKEN") LLM = "meta-llama/Llama-2-7b-chat-hf" EMBED_MODEL = "hkunlp/instructor-large" DEVICE_MAP = "auto" DEVICE = "cuda" class Llama2_7B_Chat: """Class for Llama-7B Chat model from HuggingFace""" def __init__(self) -> None: """Constrcutor of the class Llama2_7B_Chat""" print("==================== starting constructor... ======================") # Start chroma client self.__chroma_client = PersistentClient('./chroma_db') # for model bit quantization for more effiency in computation by the LLM self.__quantization_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_compute_dtype=torch.bfloat16, bnb_4bit_quant_type="nf4", bnb_4bit_use_double_quant=True, llm_int8_enable_fp32_cpu_offload=True ) tokenizer = AutoTokenizer.from_pretrained(LLM) # HuggingFaceLLM object - uses pretrained models from HuggingFace (Llama2-7B-chat model) self.__llm = HuggingFaceLLM( model_name=LLM, tokenizer=tokenizer, is_chat_model=True, max_new_tokens=512, query_wrapper_prompt=PromptTemplate( "<s> [INST] {query_str} [/INST]"), context_window=4000, model_kwargs={ "quantization_config": self.__quantization_config, "token": HF_TOKEN }, tokenizer_kwargs={ "token": HF_TOKEN }, device_map=DEVICE_MAP ) # embedding model - pretrained embedding model (it is wrapper around sentence_transformers) self.__embed_model = HuggingFaceInstructEmbeddings( model_name=EMBED_MODEL, model_kwargs={ "device": DEVICE } ) self.__index = None # Service context self.__service_context = ServiceContext.from_defaults( llm=self.__llm, embed_model=self.__embed_model) set_global_service_context(self.__service_context) def create_index(self, data_dir: str) -> None: """Creates the Vector Index for querying with LLM""" print("============= creating index.... ================") # embedding function for chromadb embedding_func = embedding_functions.HuggingFaceEmbeddingFunction( api_key=HF_TOKEN, model_name=EMBED_MODEL ) # Load the documents from data_dir docs = SimpleDirectoryReader(data_dir).load_data() # Creating collection in chroma database chroma_collection = self.__chroma_client.get_or_create_collection("data_embeddings", embedding_function=embedding_func) # Creating Chroma Vector Store vector_store = ChromaVectorStore(chroma_collection=chroma_collection) # Create storage context using chroma vector store storage_context = StorageContext.from_defaults( vector_store=vector_store) self.__index = VectorStoreIndex.from_documents(docs, storage_context=storage_context) def start_query_engine(self): """Initialize the query engine""" print("=========== starting query engine... ===============") # configure retriever retriever = VectorIndexRetriever( index=self.__index, similarity_top_k=6 ) # configure node postproceesors s_processor = SimilarityPostprocessor(similarity_cutoff=0.65) # configure response synthesizer response_synthesizer = get_response_synthesizer() query_engine = RetrieverQueryEngine( retriever=retriever, node_postprocessors=[s_processor], response_synthesizer=response_synthesizer ) return query_engine def ask_llm(self, user_query: str, query_engine): """ Ask LLM for querying data based on context returns: (RESPONSE_TYPE, List[NodeWithScore]) """ # print("User asking -->", user_query) response = query_engine.query(user_query) return response, response.source_nodes def reset_model(): """resets the model's knowledge base""" os.system("rm -rf Data_*") os.system("rm -rf vector_store_data/") os.system("rm -rf chroma_db/")
[ "llama_index.VectorStoreIndex.from_documents", "llama_index.get_response_synthesizer", "llama_index.postprocessor.SimilarityPostprocessor", "llama_index.query_engine.RetrieverQueryEngine", "llama_index.ServiceContext.from_defaults", "llama_index.vector_stores.ChromaVectorStore", "llama_index.prompts.PromptTemplate", "llama_index.storage.storage_context.StorageContext.from_defaults", "llama_index.SimpleDirectoryReader", "llama_index.retrievers.VectorIndexRetriever", "llama_index.set_global_service_context" ]
[((1191, 1204), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1202, 1204), False, 'from dotenv import load_dotenv\n'), ((1216, 1237), 'os.getenv', 'os.getenv', (['"""HF_TOKEN"""'], {}), "('HF_TOKEN')\n", (1225, 1237), False, 'import os\n'), ((5540, 5566), 'os.system', 'os.system', (['"""rm -rf Data_*"""'], {}), "('rm -rf Data_*')\n", (5549, 5566), False, 'import os\n'), ((5571, 5609), 'os.system', 'os.system', (['"""rm -rf vector_store_data/"""'], {}), "('rm -rf vector_store_data/')\n", (5580, 5609), False, 'import os\n'), ((5614, 5644), 'os.system', 'os.system', (['"""rm -rf chroma_db/"""'], {}), "('rm -rf chroma_db/')\n", (5623, 5644), False, 'import os\n'), ((1669, 1700), 'chromadb.PersistentClient', 'PersistentClient', (['"""./chroma_db"""'], {}), "('./chroma_db')\n", (1685, 1700), False, 'from chromadb import PersistentClient\n'), ((1820, 2002), 'transformers.BitsAndBytesConfig', 'BitsAndBytesConfig', ([], {'load_in_4bit': '(True)', 'bnb_4bit_compute_dtype': 'torch.bfloat16', 'bnb_4bit_quant_type': '"""nf4"""', 'bnb_4bit_use_double_quant': '(True)', 'llm_int8_enable_fp32_cpu_offload': '(True)'}), "(load_in_4bit=True, bnb_4bit_compute_dtype=torch.bfloat16,\n bnb_4bit_quant_type='nf4', bnb_4bit_use_double_quant=True,\n llm_int8_enable_fp32_cpu_offload=True)\n", (1838, 2002), False, 'from transformers import BitsAndBytesConfig\n'), ((2094, 2128), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['LLM'], {}), '(LLM)\n', (2123, 2128), False, 'from transformers import AutoTokenizer\n'), ((2919, 3010), 'langchain.embeddings.huggingface.HuggingFaceInstructEmbeddings', 'HuggingFaceInstructEmbeddings', ([], {'model_name': 'EMBED_MODEL', 'model_kwargs': "{'device': DEVICE}"}), "(model_name=EMBED_MODEL, model_kwargs={\n 'device': DEVICE})\n", (2948, 3010), False, 'from langchain.embeddings.huggingface import HuggingFaceInstructEmbeddings\n'), ((3159, 3235), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'self.__llm', 'embed_model': 'self.__embed_model'}), '(llm=self.__llm, embed_model=self.__embed_model)\n', (3187, 3235), False, 'from llama_index import ServiceContext, SimpleDirectoryReader, VectorStoreIndex, get_response_synthesizer, set_global_service_context\n'), ((3258, 3308), 'llama_index.set_global_service_context', 'set_global_service_context', (['self.__service_context'], {}), '(self.__service_context)\n', (3284, 3308), False, 'from llama_index import ServiceContext, SimpleDirectoryReader, VectorStoreIndex, get_response_synthesizer, set_global_service_context\n'), ((3558, 3652), 'chromadb.utils.embedding_functions.HuggingFaceEmbeddingFunction', 'embedding_functions.HuggingFaceEmbeddingFunction', ([], {'api_key': 'HF_TOKEN', 'model_name': 'EMBED_MODEL'}), '(api_key=HF_TOKEN,\n model_name=EMBED_MODEL)\n', (3606, 3652), False, 'from chromadb.utils import embedding_functions\n'), ((4101, 4155), 'llama_index.vector_stores.ChromaVectorStore', 'ChromaVectorStore', ([], {'chroma_collection': 'chroma_collection'}), '(chroma_collection=chroma_collection)\n', (4118, 4155), False, 'from llama_index.vector_stores import ChromaVectorStore\n'), ((4242, 4297), 'llama_index.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (4270, 4297), False, 'from llama_index.storage.storage_context import StorageContext\n'), ((4335, 4405), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['docs'], {'storage_context': 'storage_context'}), '(docs, storage_context=storage_context)\n', (4366, 4405), False, 'from llama_index import ServiceContext, SimpleDirectoryReader, VectorStoreIndex, get_response_synthesizer, set_global_service_context\n'), ((4605, 4665), 'llama_index.retrievers.VectorIndexRetriever', 'VectorIndexRetriever', ([], {'index': 'self.__index', 'similarity_top_k': '(6)'}), '(index=self.__index, similarity_top_k=6)\n', (4625, 4665), False, 'from llama_index.retrievers import VectorIndexRetriever\n'), ((4763, 4810), 'llama_index.postprocessor.SimilarityPostprocessor', 'SimilarityPostprocessor', ([], {'similarity_cutoff': '(0.65)'}), '(similarity_cutoff=0.65)\n', (4786, 4810), False, 'from llama_index.postprocessor import SimilarityPostprocessor\n'), ((4884, 4910), 'llama_index.get_response_synthesizer', 'get_response_synthesizer', ([], {}), '()\n', (4908, 4910), False, 'from llama_index import ServiceContext, SimpleDirectoryReader, VectorStoreIndex, get_response_synthesizer, set_global_service_context\n'), ((4935, 5058), 'llama_index.query_engine.RetrieverQueryEngine', 'RetrieverQueryEngine', ([], {'retriever': 'retriever', 'node_postprocessors': '[s_processor]', 'response_synthesizer': 'response_synthesizer'}), '(retriever=retriever, node_postprocessors=[s_processor],\n response_synthesizer=response_synthesizer)\n', (4955, 5058), False, 'from llama_index.query_engine import RetrieverQueryEngine\n'), ((2422, 2470), 'llama_index.prompts.PromptTemplate', 'PromptTemplate', (['"""<s> [INST] {query_str} [/INST]"""'], {}), "('<s> [INST] {query_str} [/INST]')\n", (2436, 2470), False, 'from llama_index.prompts import PromptTemplate\n'), ((3742, 3773), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['data_dir'], {}), '(data_dir)\n', (3763, 3773), False, 'from llama_index import ServiceContext, SimpleDirectoryReader, VectorStoreIndex, get_response_synthesizer, set_global_service_context\n')]
import sys sys.stdout.reconfigure(encoding="utf-8") sys.stdin.reconfigure(encoding="utf-8") import streamlit as st import streamlit.components.v1 as components import re import random CODE_BUILD_KG = """ # Prepare for GraphStore os.environ['NEBULA_USER'] = "root" os.environ['NEBULA_PASSWORD'] = "nebula" # default password os.environ['NEBULA_ADDRESS'] = "127.0.0.1:9669" # assumed we have NebulaGraph installed locally space_name = "guardians" edge_types, rel_prop_names = ["relationship"], ["relationship"] # default, could be omit if create from an empty kg tags = ["entity"] # default, could be omit if create from an empty kg graph_store = NebulaGraphStore(space_name=space_name, edge_types=edge_types, rel_prop_names=rel_prop_names, tags=tags) storage_context = StorageContext.from_defaults(graph_store=graph_store) # Download and Preprocess Data from llama_index import download_loader WikipediaReader = download_loader("WikipediaReader") loader = WikipediaReader() documents = loader.load_data(pages=['Guardians of the Galaxy Vol. 3'], auto_suggest=False) # Build Knowledge Graph kg_index = KnowledgeGraphIndex.from_documents( documents, storage_context=storage_context, max_triplets_per_chunk=10, service_context=service_context, space_name=space_name, edge_types=edge_types, rel_prop_names=rel_prop_names, tags=tags, include_embeddings=True, ) """ CODE_NL2CYPHER_LANGCHAIN = """ ## Langchain # Doc: https://python.langchain.com/docs/modules/chains/additional/graph_nebula_qa from langchain.chat_models import ChatOpenAI from langchain.chains import NebulaGraphQAChain from langchain.graphs import NebulaGraph graph = NebulaGraph( space=space_name, username="root", password="nebula", address="127.0.0.1", port=9669, session_pool_size=30, ) chain = NebulaGraphQAChain.from_llm( llm, graph=graph, verbose=True ) chain.run( "Tell me about Peter Quill?", ) """ CODE_NL2CYPHER_LLAMAINDEX = """ ## Llama Index # Doc: https://gpt-index.readthedocs.io/en/latest/examples/query_engine/knowledge_graph_query_engine.html from llama_index.query_engine import KnowledgeGraphQueryEngine from llama_index.storage.storage_context import StorageContext from llama_index.graph_stores import NebulaGraphStore nl2kg_query_engine = KnowledgeGraphQueryEngine( storage_context=storage_context, service_context=service_context, llm=llm, verbose=True, ) response = nl2kg_query_engine.query( "Tell me about Peter Quill?", ) """ import os import json import openai from llama_index.llms import AzureOpenAI from langchain.embeddings import OpenAIEmbeddings from llama_index import LangchainEmbedding from llama_index import ( VectorStoreIndex, SimpleDirectoryReader, KnowledgeGraphIndex, LLMPredictor, ServiceContext, ) from llama_index.storage.storage_context import StorageContext from llama_index.graph_stores import NebulaGraphStore import logging import sys logging.basicConfig( stream=sys.stdout, level=logging.INFO ) # logging.DEBUG for more verbose output # logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) openai.api_type = "azure" openai.api_base = st.secrets["OPENAI_API_BASE"] # openai.api_version = "2022-12-01" azure gpt-3 openai.api_version = "2023-05-15" # azure gpt-3.5 turbo openai.api_key = st.secrets["OPENAI_API_KEY"] llm = AzureOpenAI( engine=st.secrets["DEPLOYMENT_NAME"], temperature=0, model="gpt-35-turbo", ) llm_predictor = LLMPredictor(llm=llm) # You need to deploy your own embedding model as well as your own chat completion model embedding_llm = LangchainEmbedding( OpenAIEmbeddings( model="text-embedding-ada-002", deployment=st.secrets["EMBEDDING_DEPLOYMENT_NAME"], openai_api_key=openai.api_key, openai_api_base=openai.api_base, openai_api_type=openai.api_type, openai_api_version=openai.api_version, ), embed_batch_size=1, ) service_context = ServiceContext.from_defaults( llm_predictor=llm_predictor, embed_model=embedding_llm, ) os.environ["NEBULA_USER"] = st.secrets["graphd_user"] os.environ["NEBULA_PASSWORD"] = st.secrets["graphd_password"] os.environ[ "NEBULA_ADDRESS" ] = f"{st.secrets['graphd_host']}:{st.secrets['graphd_port']}" space_name = "guardians" edge_types, rel_prop_names = ["relationship"], [ "relationship" ] # default, could be omit if create from an empty kg tags = ["entity"] # default, could be omit if create from an empty kg graph_store = NebulaGraphStore( space_name=space_name, edge_types=edge_types, rel_prop_names=rel_prop_names, tags=tags, ) storage_context = StorageContext.from_defaults(graph_store=graph_store) from llama_index.query_engine import KnowledgeGraphQueryEngine from llama_index.storage.storage_context import StorageContext from llama_index.graph_stores import NebulaGraphStore nl2kg_query_engine = KnowledgeGraphQueryEngine( storage_context=storage_context, service_context=service_context, llm=llm, verbose=True, ) def cypher_to_all_paths(query): # Find the MATCH and RETURN parts match_parts = re.findall(r"(MATCH .+?(?=MATCH|$))", query, re.I | re.S) return_part = re.search(r"RETURN .+", query).group() modified_matches = [] path_ids = [] # Go through each MATCH part for i, part in enumerate(match_parts): path_id = f"path_{i}" path_ids.append(path_id) # Replace the MATCH keyword with "MATCH path_i = " modified_part = part.replace("MATCH ", f"MATCH {path_id} = ") modified_matches.append(modified_part) # Join the modified MATCH parts matches_string = " ".join(modified_matches) # Construct the new RETURN part return_string = f"RETURN {', '.join(path_ids)};" # Remove the old RETURN part from matches_string matches_string = matches_string.replace(return_part, "") # Combine everything modified_query = f"{matches_string}\n{return_string}" return modified_query # write string to file def result_to_df(result): from typing import Dict import pandas as pd columns = result.keys() d: Dict[str, list] = {} for col_num in range(result.col_size()): col_name = columns[col_num] col_list = result.column_values(col_name) d[col_name] = [x.cast() for x in col_list] return pd.DataFrame(d) def render_pd_item(g, item): from nebula3.data.DataObject import Node, PathWrapper, Relationship if isinstance(item, Node): node_id = item.get_id().cast() tags = item.tags() # list of strings props = dict() for tag in tags: props.update(item.properties(tag)) g.add_node(node_id, label=node_id, title=str(props)) elif isinstance(item, Relationship): src_id = item.start_vertex_id().cast() dst_id = item.end_vertex_id().cast() edge_name = item.edge_name() props = item.properties() # ensure start and end vertex exist in graph if not src_id in g.node_ids: g.add_node(src_id) if not dst_id in g.node_ids: g.add_node(dst_id) g.add_edge(src_id, dst_id, label=edge_name, title=str(props)) elif isinstance(item, PathWrapper): for node in item.nodes(): render_pd_item(g, node) for edge in item.relationships(): render_pd_item(g, edge) elif isinstance(item, list): for it in item: render_pd_item(g, it) def create_pyvis_graph(result_df): from pyvis.network import Network g = Network( notebook=True, directed=True, cdn_resources="in_line", height="500px", width="100%", ) for _, row in result_df.iterrows(): for item in row: render_pd_item(g, item) g.repulsion( node_distance=100, central_gravity=0.2, spring_length=200, spring_strength=0.05, damping=0.09, ) return g def query_nebulagraph( query, space_name=space_name, address=st.secrets["graphd_host"], port=9669, user=st.secrets["graphd_user"], password=st.secrets["graphd_password"], ): from nebula3.Config import SessionPoolConfig from nebula3.gclient.net.SessionPool import SessionPool config = SessionPoolConfig() session_pool = SessionPool(user, password, space_name, [(address, port)]) session_pool.init(config) return session_pool.execute(query) st.title("Demo: Knowledge Graph Build and Query with LLM") ( tab_code_kg, tab_notebook, tab_graph_view, tab_cypher, tab_nl2cypher, tab_code_nl2cypher, ) = st.tabs( [ "Code: Build KG", "Full Notebook", "Graph View", "Query", "Natural Language to Cypher", "Code: NL2Cypher", ] ) with tab_code_kg: st.write( "With a few lines of code, we can build a knowledge graph with LLM, LlamaIndex and NebulaGraph." ) st.write( "See full notebook for more details and try Graph Visualizations, Query, and Natural Language to Cypher by clicking on the tabs on the right." ) st.code(body=CODE_BUILD_KG, language="python") with tab_notebook: st.write("> Full Notebook") st.markdown( """ This is the full notebook to demonstrate how to: - Extract from data sources and build a knowledge graph with LLM and Llama Index, NebulaGraph in 3 lines of code - Query the Knowledge Graph with nGQL and visualize the graph - Query the knowledge graph with natural language in 1 line of code(both Langchain and Llama Index) """ ) # link to download notebook st.markdown( """ [Download](https://www.siwei.io/demo-dumps/kg-llm/KG_Building.ipynb) the notebook. """ ) components.iframe( src="https://www.siwei.io/demo-dumps/kg-llm/KG_Building.html", height=2000, width=800, scrolling=True, ) with tab_graph_view: st.write( "> Sub-Graph View of the Knowledge Graph about [Guardians of the Galaxy Vol. 3](https://en.wikipedia.org/wiki/Guardians_of_the_Galaxy_Vol._3)" ) components.iframe( src="https://www.siwei.io/demo-dumps/kg-llm/nebulagraph_draw_sample.html", height=500, scrolling=True, ) with tab_cypher: st.write("> Query Knowledge Graph in nGQL") query_string = st.text_input( label="Enter nGQL query string", value="MATCH ()-[e]->() RETURN e LIMIT 25" ) if st.button("> execute"): # run query result = query_nebulagraph(query_string) # convert to pandas dataframe result_df = result_to_df(result) # display pd dataframe st.dataframe(result_df) # create pyvis graph g = create_pyvis_graph(result_df) # render with random file name import random graph_html = g.generate_html(f"graph_{random.randint(0, 1000)}.html") components.html(graph_html, height=500, scrolling=True) with tab_nl2cypher: st.write("> Natural Language to Cypher") nl_query_string = st.text_input( label="Enter natural language query string", value="Tell me about Peter Quill?" ) if st.button("Ask KG"): response = nl2kg_query_engine.query(nl_query_string) graph_query = list(response.metadata.values())[0]["graph_store_query"] graph_query = graph_query.replace("WHERE", "\n WHERE").replace( "RETURN", "\nRETURN" ) answer = str(response) st.write(f"*Answer*: {answer}") st.markdown( f""" ## Generated NebulaGraph Cypher Query ```cypher {graph_query} ``` """ ) st.write("## Rendered Graph") render_query = cypher_to_all_paths(graph_query) result = query_nebulagraph(render_query) result_df = result_to_df(result) # create pyvis graph g = create_pyvis_graph(result_df) # render with random file name graph_html = g.generate_html(f"graph_{random.randint(0, 1000)}.html") components.html(graph_html, height=500, scrolling=True) with tab_code_nl2cypher: st.write( "> Natural Language to NebulaGraph Cypher Code with Langchain and Llama Index" ) tab_langchain, tab_llamaindex = st.tabs(["Langchain", "Llama Index"]) with tab_langchain: st.code(body=CODE_NL2CYPHER_LANGCHAIN, language="python") with tab_llamaindex: st.code(body=CODE_NL2CYPHER_LLAMAINDEX, language="python") st.markdown( """ ## References - [Langchain: NebulaGraphQAChain](https://python.langchain.com/docs/modules/chains/additional/graph_nebula_qa) - [Llama Index: KnowledgeGraphQueryEngine](https://gpt-index.readthedocs.io/en/latest/examples/query_engine/knowledge_graph_query_engine.html) """ )
[ "llama_index.llms.AzureOpenAI", "llama_index.LLMPredictor", "llama_index.query_engine.KnowledgeGraphQueryEngine", "llama_index.ServiceContext.from_defaults", "llama_index.storage.storage_context.StorageContext.from_defaults", "llama_index.graph_stores.NebulaGraphStore" ]
[((12, 52), 'sys.stdout.reconfigure', 'sys.stdout.reconfigure', ([], {'encoding': '"""utf-8"""'}), "(encoding='utf-8')\n", (34, 52), False, 'import sys\n'), ((53, 92), 'sys.stdin.reconfigure', 'sys.stdin.reconfigure', ([], {'encoding': '"""utf-8"""'}), "(encoding='utf-8')\n", (74, 92), False, 'import sys\n'), ((2986, 3044), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (3005, 3044), False, 'import logging\n'), ((3400, 3491), 'llama_index.llms.AzureOpenAI', 'AzureOpenAI', ([], {'engine': "st.secrets['DEPLOYMENT_NAME']", 'temperature': '(0)', 'model': '"""gpt-35-turbo"""'}), "(engine=st.secrets['DEPLOYMENT_NAME'], temperature=0, model=\n 'gpt-35-turbo')\n", (3411, 3491), False, 'from llama_index.llms import AzureOpenAI\n'), ((3518, 3539), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (3530, 3539), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, KnowledgeGraphIndex, LLMPredictor, ServiceContext\n'), ((4007, 4096), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'embed_model': 'embedding_llm'}), '(llm_predictor=llm_predictor, embed_model=\n embedding_llm)\n', (4035, 4096), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, KnowledgeGraphIndex, LLMPredictor, ServiceContext\n'), ((4550, 4658), 'llama_index.graph_stores.NebulaGraphStore', 'NebulaGraphStore', ([], {'space_name': 'space_name', 'edge_types': 'edge_types', 'rel_prop_names': 'rel_prop_names', 'tags': 'tags'}), '(space_name=space_name, edge_types=edge_types,\n rel_prop_names=rel_prop_names, tags=tags)\n', (4566, 4658), False, 'from llama_index.graph_stores import NebulaGraphStore\n'), ((4692, 4745), 'llama_index.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'graph_store': 'graph_store'}), '(graph_store=graph_store)\n', (4720, 4745), False, 'from llama_index.storage.storage_context import StorageContext\n'), ((4950, 5069), 'llama_index.query_engine.KnowledgeGraphQueryEngine', 'KnowledgeGraphQueryEngine', ([], {'storage_context': 'storage_context', 'service_context': 'service_context', 'llm': 'llm', 'verbose': '(True)'}), '(storage_context=storage_context, service_context=\n service_context, llm=llm, verbose=True)\n', (4975, 5069), False, 'from llama_index.query_engine import KnowledgeGraphQueryEngine\n'), ((8528, 8586), 'streamlit.title', 'st.title', (['"""Demo: Knowledge Graph Build and Query with LLM"""'], {}), "('Demo: Knowledge Graph Build and Query with LLM')\n", (8536, 8586), True, 'import streamlit as st\n'), ((8708, 8828), 'streamlit.tabs', 'st.tabs', (["['Code: Build KG', 'Full Notebook', 'Graph View', 'Query',\n 'Natural Language to Cypher', 'Code: NL2Cypher']"], {}), "(['Code: Build KG', 'Full Notebook', 'Graph View', 'Query',\n 'Natural Language to Cypher', 'Code: NL2Cypher'])\n", (8715, 8828), True, 'import streamlit as st\n'), ((3669, 3918), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'model': '"""text-embedding-ada-002"""', 'deployment': "st.secrets['EMBEDDING_DEPLOYMENT_NAME']", 'openai_api_key': 'openai.api_key', 'openai_api_base': 'openai.api_base', 'openai_api_type': 'openai.api_type', 'openai_api_version': 'openai.api_version'}), "(model='text-embedding-ada-002', deployment=st.secrets[\n 'EMBEDDING_DEPLOYMENT_NAME'], openai_api_key=openai.api_key,\n openai_api_base=openai.api_base, openai_api_type=openai.api_type,\n openai_api_version=openai.api_version)\n", (3685, 3918), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((5174, 5230), 're.findall', 're.findall', (['"""(MATCH .+?(?=MATCH|$))"""', 'query', '(re.I | re.S)'], {}), "('(MATCH .+?(?=MATCH|$))', query, re.I | re.S)\n", (5184, 5230), False, 'import re\n'), ((6406, 6421), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (6418, 6421), True, 'import pandas as pd\n'), ((7624, 7721), 'pyvis.network.Network', 'Network', ([], {'notebook': '(True)', 'directed': '(True)', 'cdn_resources': '"""in_line"""', 'height': '"""500px"""', 'width': '"""100%"""'}), "(notebook=True, directed=True, cdn_resources='in_line', height=\n '500px', width='100%')\n", (7631, 7721), False, 'from pyvis.network import Network\n'), ((8359, 8378), 'nebula3.Config.SessionPoolConfig', 'SessionPoolConfig', ([], {}), '()\n', (8376, 8378), False, 'from nebula3.Config import SessionPoolConfig\n'), ((8398, 8456), 'nebula3.gclient.net.SessionPool.SessionPool', 'SessionPool', (['user', 'password', 'space_name', '[(address, port)]'], {}), '(user, password, space_name, [(address, port)])\n', (8409, 8456), False, 'from nebula3.gclient.net.SessionPool import SessionPool\n'), ((8909, 9025), 'streamlit.write', 'st.write', (['"""With a few lines of code, we can build a knowledge graph with LLM, LlamaIndex and NebulaGraph."""'], {}), "(\n 'With a few lines of code, we can build a knowledge graph with LLM, LlamaIndex and NebulaGraph.'\n )\n", (8917, 9025), True, 'import streamlit as st\n'), ((9034, 9196), 'streamlit.write', 'st.write', (['"""See full notebook for more details and try Graph Visualizations, Query, and Natural Language to Cypher by clicking on the tabs on the right."""'], {}), "(\n 'See full notebook for more details and try Graph Visualizations, Query, and Natural Language to Cypher by clicking on the tabs on the right.'\n )\n", (9042, 9196), True, 'import streamlit as st\n'), ((9205, 9251), 'streamlit.code', 'st.code', ([], {'body': 'CODE_BUILD_KG', 'language': '"""python"""'}), "(body=CODE_BUILD_KG, language='python')\n", (9212, 9251), True, 'import streamlit as st\n'), ((9276, 9303), 'streamlit.write', 'st.write', (['"""> Full Notebook"""'], {}), "('> Full Notebook')\n", (9284, 9303), True, 'import streamlit as st\n'), ((9308, 9680), 'streamlit.markdown', 'st.markdown', (['"""\n\nThis is the full notebook to demonstrate how to:\n\n- Extract from data sources and build a knowledge graph with LLM and Llama Index, NebulaGraph in 3 lines of code\n- Query the Knowledge Graph with nGQL and visualize the graph\n- Query the knowledge graph with natural language in 1 line of code(both Langchain and Llama Index)\n """'], {}), '(\n """\n\nThis is the full notebook to demonstrate how to:\n\n- Extract from data sources and build a knowledge graph with LLM and Llama Index, NebulaGraph in 3 lines of code\n- Query the Knowledge Graph with nGQL and visualize the graph\n- Query the knowledge graph with natural language in 1 line of code(both Langchain and Llama Index)\n """\n )\n', (9319, 9680), True, 'import streamlit as st\n'), ((9721, 9834), 'streamlit.markdown', 'st.markdown', (['"""\n[Download](https://www.siwei.io/demo-dumps/kg-llm/KG_Building.ipynb) the notebook.\n"""'], {}), '(\n """\n[Download](https://www.siwei.io/demo-dumps/kg-llm/KG_Building.ipynb) the notebook.\n"""\n )\n', (9732, 9834), True, 'import streamlit as st\n'), ((9844, 9973), 'streamlit.components.v1.iframe', 'components.iframe', ([], {'src': '"""https://www.siwei.io/demo-dumps/kg-llm/KG_Building.html"""', 'height': '(2000)', 'width': '(800)', 'scrolling': '(True)'}), "(src=\n 'https://www.siwei.io/demo-dumps/kg-llm/KG_Building.html', height=2000,\n width=800, scrolling=True)\n", (9861, 9973), True, 'import streamlit.components.v1 as components\n'), ((10030, 10192), 'streamlit.write', 'st.write', (['"""> Sub-Graph View of the Knowledge Graph about [Guardians of the Galaxy Vol. 3](https://en.wikipedia.org/wiki/Guardians_of_the_Galaxy_Vol._3)"""'], {}), "(\n '> Sub-Graph View of the Knowledge Graph about [Guardians of the Galaxy Vol. 3](https://en.wikipedia.org/wiki/Guardians_of_the_Galaxy_Vol._3)'\n )\n", (10038, 10192), True, 'import streamlit as st\n'), ((10201, 10330), 'streamlit.components.v1.iframe', 'components.iframe', ([], {'src': '"""https://www.siwei.io/demo-dumps/kg-llm/nebulagraph_draw_sample.html"""', 'height': '(500)', 'scrolling': '(True)'}), "(src=\n 'https://www.siwei.io/demo-dumps/kg-llm/nebulagraph_draw_sample.html',\n height=500, scrolling=True)\n", (10218, 10330), True, 'import streamlit.components.v1 as components\n'), ((10375, 10418), 'streamlit.write', 'st.write', (['"""> Query Knowledge Graph in nGQL"""'], {}), "('> Query Knowledge Graph in nGQL')\n", (10383, 10418), True, 'import streamlit as st\n'), ((10438, 10533), 'streamlit.text_input', 'st.text_input', ([], {'label': '"""Enter nGQL query string"""', 'value': '"""MATCH ()-[e]->() RETURN e LIMIT 25"""'}), "(label='Enter nGQL query string', value=\n 'MATCH ()-[e]->() RETURN e LIMIT 25')\n", (10451, 10533), True, 'import streamlit as st\n'), ((10550, 10572), 'streamlit.button', 'st.button', (['"""> execute"""'], {}), "('> execute')\n", (10559, 10572), True, 'import streamlit as st\n'), ((11090, 11130), 'streamlit.write', 'st.write', (['"""> Natural Language to Cypher"""'], {}), "('> Natural Language to Cypher')\n", (11098, 11130), True, 'import streamlit as st\n'), ((11153, 11252), 'streamlit.text_input', 'st.text_input', ([], {'label': '"""Enter natural language query string"""', 'value': '"""Tell me about Peter Quill?"""'}), "(label='Enter natural language query string', value=\n 'Tell me about Peter Quill?')\n", (11166, 11252), True, 'import streamlit as st\n'), ((11269, 11288), 'streamlit.button', 'st.button', (['"""Ask KG"""'], {}), "('Ask KG')\n", (11278, 11288), True, 'import streamlit as st\n'), ((12205, 12303), 'streamlit.write', 'st.write', (['"""> Natural Language to NebulaGraph Cypher Code with Langchain and Llama Index"""'], {}), "(\n '> Natural Language to NebulaGraph Cypher Code with Langchain and Llama Index'\n )\n", (12213, 12303), True, 'import streamlit as st\n'), ((12344, 12381), 'streamlit.tabs', 'st.tabs', (["['Langchain', 'Llama Index']"], {}), "(['Langchain', 'Llama Index'])\n", (12351, 12381), True, 'import streamlit as st\n'), ((12569, 12885), 'streamlit.markdown', 'st.markdown', (['"""\n\n## References\n \n- [Langchain: NebulaGraphQAChain](https://python.langchain.com/docs/modules/chains/additional/graph_nebula_qa)\n- [Llama Index: KnowledgeGraphQueryEngine](https://gpt-index.readthedocs.io/en/latest/examples/query_engine/knowledge_graph_query_engine.html)\n"""'], {}), '(\n """\n\n## References\n \n- [Langchain: NebulaGraphQAChain](https://python.langchain.com/docs/modules/chains/additional/graph_nebula_qa)\n- [Llama Index: KnowledgeGraphQueryEngine](https://gpt-index.readthedocs.io/en/latest/examples/query_engine/knowledge_graph_query_engine.html)\n"""\n )\n', (12580, 12885), True, 'import streamlit as st\n'), ((10763, 10786), 'streamlit.dataframe', 'st.dataframe', (['result_df'], {}), '(result_df)\n', (10775, 10786), True, 'import streamlit as st\n'), ((11009, 11064), 'streamlit.components.v1.html', 'components.html', (['graph_html'], {'height': '(500)', 'scrolling': '(True)'}), '(graph_html, height=500, scrolling=True)\n', (11024, 11064), True, 'import streamlit.components.v1 as components\n'), ((11585, 11616), 'streamlit.write', 'st.write', (['f"""*Answer*: {answer}"""'], {}), "(f'*Answer*: {answer}')\n", (11593, 11616), True, 'import streamlit as st\n'), ((11625, 11717), 'streamlit.markdown', 'st.markdown', (['f"""\n## Generated NebulaGraph Cypher Query\n```cypher\n{graph_query}\n```\n"""'], {}), '(\n f"""\n## Generated NebulaGraph Cypher Query\n```cypher\n{graph_query}\n```\n""")\n', (11636, 11717), True, 'import streamlit as st\n'), ((11743, 11772), 'streamlit.write', 'st.write', (['"""## Rendered Graph"""'], {}), "('## Rendered Graph')\n", (11751, 11772), True, 'import streamlit as st\n'), ((12118, 12173), 'streamlit.components.v1.html', 'components.html', (['graph_html'], {'height': '(500)', 'scrolling': '(True)'}), '(graph_html, height=500, scrolling=True)\n', (12133, 12173), True, 'import streamlit.components.v1 as components\n'), ((12414, 12471), 'streamlit.code', 'st.code', ([], {'body': 'CODE_NL2CYPHER_LANGCHAIN', 'language': '"""python"""'}), "(body=CODE_NL2CYPHER_LANGCHAIN, language='python')\n", (12421, 12471), True, 'import streamlit as st\n'), ((12505, 12563), 'streamlit.code', 'st.code', ([], {'body': 'CODE_NL2CYPHER_LLAMAINDEX', 'language': '"""python"""'}), "(body=CODE_NL2CYPHER_LLAMAINDEX, language='python')\n", (12512, 12563), True, 'import streamlit as st\n'), ((5250, 5279), 're.search', 're.search', (['"""RETURN .+"""', 'query'], {}), "('RETURN .+', query)\n", (5259, 5279), False, 'import re\n'), ((10968, 10991), 'random.randint', 'random.randint', (['(0)', '(1000)'], {}), '(0, 1000)\n', (10982, 10991), False, 'import random\n'), ((12077, 12100), 'random.randint', 'random.randint', (['(0)', '(1000)'], {}), '(0, 1000)\n', (12091, 12100), False, 'import random\n')]
import datetime import uuid from llama_index.core.memory import ChatMemoryBuffer class Chat: def __init__(self, model): self.model = model if model.id is None: self.id = str(uuid.uuid4()) else: self.id = model.id self.history = ChatMemoryBuffer.from_defaults(token_limit=3900) self.created = datetime.datetime.now() def clearHistory(self): self.history.reset() def __eq__(self, other): return self.id == other.id
[ "llama_index.core.memory.ChatMemoryBuffer.from_defaults" ]
[((293, 341), 'llama_index.core.memory.ChatMemoryBuffer.from_defaults', 'ChatMemoryBuffer.from_defaults', ([], {'token_limit': '(3900)'}), '(token_limit=3900)\n', (323, 341), False, 'from llama_index.core.memory import ChatMemoryBuffer\n'), ((366, 389), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (387, 389), False, 'import datetime\n'), ((210, 222), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (220, 222), False, 'import uuid\n')]
from pathlib import Path from llama_index import Document, SimpleDirectoryReader, download_loader from llama_index.query_engine import RetrieverQueryEngine from llama_index import GPTVectorStoreIndex, StorageContext, ServiceContext from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.vector_stores import PineconeVectorStore import pinecone import os from llama_index.node_parser import SimpleNodeParser import openai from dotenv import load_dotenv from os import getenv def query_research(message): load_dotenv() #openai.api_key_path = getenv('OPENAI_API_KEY') #constructor # def __init__( # self, # api_key, # api_username, # openai_api_key=None, # base_url='https://forum.subspace.network', # verbose=True, # ): #load PDF # PDFReader = download_loader("PDFReader") # loader = PDFReader() # docs = loader.load_data(file=Path('../../data/whitepaper.pdf')) docs = SimpleDirectoryReader('/Users/ryanyeung/Code/Crypto/SupportGPT/supportgpt/sources/data').load_data() #parse PDF parser = SimpleNodeParser() nodes = parser.get_nodes_from_documents(docs) # initialize connection to pinecone # pinecone.init( # getenv('PINECONE_API_KEY'), # getenv('PINECONE_ENVIRONMENT'), # ) pinecone.init( api_key=os.environ['PINECONE_API_KEY'], environment=os.environ['PINECONE_ENVIRONMENT'] ) # create the index if it does not exist already index_name = 'research-test' if index_name not in pinecone.list_indexes(): pinecone.create_index( index_name, dimension=1536, metric='cosine' ) # connect to the index pinecone_index = pinecone.Index(index_name) # we can select a namespace (acts as a partition in an index) namespace = '' # default namespace vector_store = PineconeVectorStore(pinecone_index=pinecone_index) # setup our storage (vector db) storage_context = StorageContext.from_defaults( vector_store=vector_store ) # setup the index/query process, ie the embedding model (and completion if used) embed_model = OpenAIEmbedding(model='text-embedding-ada-002', embed_batch_size=100) service_context = ServiceContext.from_defaults(embed_model=embed_model) index = GPTVectorStoreIndex.from_documents( docs, storage_context=storage_context, service_context=service_context ) # retriever = index.as_retriever(retriever_mode='default') # query_engine = RetrieverQueryEngine(retriever) # #query_engine = RetrieverQueryEngine.from_args(retriever, response_mode='default') query_engine = index.as_query_engine() res = query_engine.query(message) return str(res) # print(str(res)) # print(res.get_formatted_sources()) #pinecone.delete_index(index_name)
[ "llama_index.GPTVectorStoreIndex.from_documents", "llama_index.vector_stores.PineconeVectorStore", "llama_index.node_parser.SimpleNodeParser", "llama_index.ServiceContext.from_defaults", "llama_index.StorageContext.from_defaults", "llama_index.SimpleDirectoryReader", "llama_index.embeddings.openai.OpenAIEmbedding" ]
[((531, 544), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (542, 544), False, 'from dotenv import load_dotenv\n'), ((1142, 1160), 'llama_index.node_parser.SimpleNodeParser', 'SimpleNodeParser', ([], {}), '()\n', (1158, 1160), False, 'from llama_index.node_parser import SimpleNodeParser\n'), ((1366, 1472), 'pinecone.init', 'pinecone.init', ([], {'api_key': "os.environ['PINECONE_API_KEY']", 'environment': "os.environ['PINECONE_ENVIRONMENT']"}), "(api_key=os.environ['PINECONE_API_KEY'], environment=os.\n environ['PINECONE_ENVIRONMENT'])\n", (1379, 1472), False, 'import pinecone\n'), ((1804, 1830), 'pinecone.Index', 'pinecone.Index', (['index_name'], {}), '(index_name)\n', (1818, 1830), False, 'import pinecone\n'), ((1965, 2015), 'llama_index.vector_stores.PineconeVectorStore', 'PineconeVectorStore', ([], {'pinecone_index': 'pinecone_index'}), '(pinecone_index=pinecone_index)\n', (1984, 2015), False, 'from llama_index.vector_stores import PineconeVectorStore\n'), ((2076, 2131), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (2104, 2131), False, 'from llama_index import GPTVectorStoreIndex, StorageContext, ServiceContext\n'), ((2249, 2318), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {'model': '"""text-embedding-ada-002"""', 'embed_batch_size': '(100)'}), "(model='text-embedding-ada-002', embed_batch_size=100)\n", (2264, 2318), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n'), ((2341, 2394), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'embed_model'}), '(embed_model=embed_model)\n', (2369, 2394), False, 'from llama_index import GPTVectorStoreIndex, StorageContext, ServiceContext\n'), ((2408, 2518), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['docs'], {'storage_context': 'storage_context', 'service_context': 'service_context'}), '(docs, storage_context=storage_context,\n service_context=service_context)\n', (2442, 2518), False, 'from llama_index import GPTVectorStoreIndex, StorageContext, ServiceContext\n'), ((1609, 1632), 'pinecone.list_indexes', 'pinecone.list_indexes', ([], {}), '()\n', (1630, 1632), False, 'import pinecone\n'), ((1642, 1708), 'pinecone.create_index', 'pinecone.create_index', (['index_name'], {'dimension': '(1536)', 'metric': '"""cosine"""'}), "(index_name, dimension=1536, metric='cosine')\n", (1663, 1708), False, 'import pinecone\n'), ((1013, 1106), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""/Users/ryanyeung/Code/Crypto/SupportGPT/supportgpt/sources/data"""'], {}), "(\n '/Users/ryanyeung/Code/Crypto/SupportGPT/supportgpt/sources/data')\n", (1034, 1106), False, 'from llama_index import Document, SimpleDirectoryReader, download_loader\n')]
from llama_index.node_parser import SimpleNodeParser from typing import * from llama_index.data_structs import Node import requests from collections import defaultdict from llama_index import Document from config import config def load_and_parse(all_docs): documents = [] for file_row in all_docs: url = file_row["url"] content = file_row["text"] images = file_row['images'] metadata = defaultdict() metadata['URL'] = url metadata['images'] = images body_text = content documents.append(Document(text=body_text, metadata=dict(metadata))) return documents def reader(urls, imgs_links): all_pages = [] for url in urls: try: res = requests.get(url, timeout=10) except: continue if res.status_code == 200: all_pages.append((url, res.text, imgs_links)) return all_pages def convert_documents_into_nodes(documents): all_nodes = [] for document in documents: parser = SimpleNodeParser.from_defaults( chunk_size=config.node_chunk_size, chunk_overlap=50) nodes = parser.get_nodes_from_documents([document]) all_nodes.extend(nodes) return all_nodes
[ "llama_index.node_parser.SimpleNodeParser.from_defaults" ]
[((430, 443), 'collections.defaultdict', 'defaultdict', ([], {}), '()\n', (441, 443), False, 'from collections import defaultdict\n'), ((1033, 1120), 'llama_index.node_parser.SimpleNodeParser.from_defaults', 'SimpleNodeParser.from_defaults', ([], {'chunk_size': 'config.node_chunk_size', 'chunk_overlap': '(50)'}), '(chunk_size=config.node_chunk_size,\n chunk_overlap=50)\n', (1063, 1120), False, 'from llama_index.node_parser import SimpleNodeParser\n'), ((738, 767), 'requests.get', 'requests.get', (['url'], {'timeout': '(10)'}), '(url, timeout=10)\n', (750, 767), False, 'import requests\n')]
from llama_index import SimpleDirectoryReader, LLMPredictor, PromptHelper, StorageContext, ServiceContext, GPTVectorStoreIndex, load_index_from_storage from langchain.chat_models import ChatOpenAI import gradio as gr import sys import os import openai openai.api_base = "https://api.app4gpt.com/v1" os.environ["OPENAI_API_KEY"] = 'you-API-KEY' def create_service_context(): #constraint parameters max_input_size = 4096 num_outputs = 3072 max_chunk_overlap = 20 chunk_size_limit = 600 #allows the user to explicitly set certain constraint parameters prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit) #LLMPredictor is a wrapper class around LangChain's LLMChain that allows easy integration into LlamaIndex llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0.5, model_name="gpt-3.5-turbo", max_tokens=num_outputs)) #constructs service_context service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper) return service_context def data_ingestion_indexing(directory_path): #loads data from the specified directory path documents = SimpleDirectoryReader(directory_path).load_data() #when first building the index index = GPTVectorStoreIndex.from_documents( documents, service_context=create_service_context() ) #persist index to disk, default "storage" folder index.storage_context.persist() return index def data_querying(input_text): #rebuild storage context storage_context = StorageContext.from_defaults(persist_dir="./storage") #loads index from storage index = load_index_from_storage(storage_context, service_context=create_service_context()) #queries the index with the input text response = index.as_query_engine().query(input_text) return response.response iface = gr.Interface(fn=data_querying, inputs=gr.components.Textbox(lines=7, label="Enter your question"), outputs="text", title="Custom-Pdf Demo by Gpt4") #passes in data directory index = data_ingestion_indexing("data") iface.launch(share=False)
[ "llama_index.PromptHelper", "llama_index.ServiceContext.from_defaults", "llama_index.StorageContext.from_defaults", "llama_index.SimpleDirectoryReader" ]
[((597, 696), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_outputs', 'max_chunk_overlap'], {'chunk_size_limit': 'chunk_size_limit'}), '(max_input_size, num_outputs, max_chunk_overlap,\n chunk_size_limit=chunk_size_limit)\n', (609, 696), False, 'from llama_index import SimpleDirectoryReader, LLMPredictor, PromptHelper, StorageContext, ServiceContext, GPTVectorStoreIndex, load_index_from_storage\n'), ((977, 1068), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'prompt_helper': 'prompt_helper'}), '(llm_predictor=llm_predictor, prompt_helper=\n prompt_helper)\n', (1005, 1068), False, 'from llama_index import SimpleDirectoryReader, LLMPredictor, PromptHelper, StorageContext, ServiceContext, GPTVectorStoreIndex, load_index_from_storage\n'), ((1601, 1654), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""./storage"""'}), "(persist_dir='./storage')\n", (1629, 1654), False, 'from llama_index import SimpleDirectoryReader, LLMPredictor, PromptHelper, StorageContext, ServiceContext, GPTVectorStoreIndex, load_index_from_storage\n'), ((1988, 2047), 'gradio.components.Textbox', 'gr.components.Textbox', ([], {'lines': '(7)', 'label': '"""Enter your question"""'}), "(lines=7, label='Enter your question')\n", (2009, 2047), True, 'import gradio as gr\n'), ((841, 920), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0.5)', 'model_name': '"""gpt-3.5-turbo"""', 'max_tokens': 'num_outputs'}), "(temperature=0.5, model_name='gpt-3.5-turbo', max_tokens=num_outputs)\n", (851, 920), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1205, 1242), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['directory_path'], {}), '(directory_path)\n', (1226, 1242), False, 'from llama_index import SimpleDirectoryReader, LLMPredictor, PromptHelper, StorageContext, ServiceContext, GPTVectorStoreIndex, load_index_from_storage\n')]
from llama_index.core.llms import ChatMessage from llama_index.llms.huggingface import HuggingFaceLLM from llama_index.core.prompts import PromptTemplate from projectgurukul.custom_models import model_utils import logging def get_tinyllama_llm(context_window = 2048, max_new_tokens = 256, system_prompt = ""): def messages_to_prompt(messages: ChatMessage): messages_dict = [ {"role": message.role.value, "content": message.content} for message in messages ] prompt = huggingllm._tokenizer.apply_chat_template(messages_dict, tokenize=False, add_generation_prompt=True) logging.debug(prompt) return prompt device, dtype = model_utils.get_device_and_dtype() # This will wrap the default prompts that are internal to llama-index query_wrapper_prompt = PromptTemplate( f"<|system|>{system_prompt}"+"<|user|>{query_str}<|assistant|>") huggingllm = HuggingFaceLLM( context_window=context_window, is_chat_model=True, model_name="TinyLlama/TinyLlama-1.1B-Chat-v1.0", tokenizer_name="TinyLlama/TinyLlama-1.1B-Chat-v1.0", max_new_tokens=max_new_tokens, stopping_ids=[2, 50256], generate_kwargs={'do_sample': False}, model_kwargs={"torch_dtype": dtype}, query_wrapper_prompt=query_wrapper_prompt, device_map = device, system_prompt=system_prompt ) huggingllm.messages_to_prompt = messages_to_prompt return huggingllm def get_phi2_llm(context_window = 2048, max_new_tokens = 256, system_prompt = ""): role_maps = { "system" :"Instructions", "user" :"User Instructions" } def messages_to_prompt(messages: ChatMessage): prompt = "" for message in messages: role = message.role.value role = role_maps[role] if role in role_maps else role prompt += f"\n{role}:: {message.content}\n\n" prompt += "Response ::" logging.debug(prompt) return prompt device, dtype = model_utils.get_device_and_dtype() # This will wrap the default prompts that are internal to llama-index query_wrapper_prompt = PromptTemplate("Instruct: {query_str}\nOutput: ") huggingllm = HuggingFaceLLM( context_window=context_window, is_chat_model=True, model_name="microsoft/phi-2", tokenizer_name="microsoft/phi-2", max_new_tokens=max_new_tokens, stopping_ids=[50256], generate_kwargs={'do_sample': False}, model_kwargs={"torch_dtype": dtype, "trust_remote_code" :True}, query_wrapper_prompt=query_wrapper_prompt, messages_to_prompt = messages_to_prompt, device_map = device, system_prompt=system_prompt ) return huggingllm
[ "llama_index.llms.huggingface.HuggingFaceLLM", "llama_index.core.prompts.PromptTemplate" ]
[((720, 754), 'projectgurukul.custom_models.model_utils.get_device_and_dtype', 'model_utils.get_device_and_dtype', ([], {}), '()\n', (752, 754), False, 'from projectgurukul.custom_models import model_utils\n'), ((857, 942), 'llama_index.core.prompts.PromptTemplate', 'PromptTemplate', (["(f'<|system|>{system_prompt}' + '<|user|>{query_str}<|assistant|>')"], {}), "(f'<|system|>{system_prompt}' +\n '<|user|>{query_str}<|assistant|>')\n", (871, 942), False, 'from llama_index.core.prompts import PromptTemplate\n'), ((964, 1375), 'llama_index.llms.huggingface.HuggingFaceLLM', 'HuggingFaceLLM', ([], {'context_window': 'context_window', 'is_chat_model': '(True)', 'model_name': '"""TinyLlama/TinyLlama-1.1B-Chat-v1.0"""', 'tokenizer_name': '"""TinyLlama/TinyLlama-1.1B-Chat-v1.0"""', 'max_new_tokens': 'max_new_tokens', 'stopping_ids': '[2, 50256]', 'generate_kwargs': "{'do_sample': False}", 'model_kwargs': "{'torch_dtype': dtype}", 'query_wrapper_prompt': 'query_wrapper_prompt', 'device_map': 'device', 'system_prompt': 'system_prompt'}), "(context_window=context_window, is_chat_model=True,\n model_name='TinyLlama/TinyLlama-1.1B-Chat-v1.0', tokenizer_name=\n 'TinyLlama/TinyLlama-1.1B-Chat-v1.0', max_new_tokens=max_new_tokens,\n stopping_ids=[2, 50256], generate_kwargs={'do_sample': False},\n model_kwargs={'torch_dtype': dtype}, query_wrapper_prompt=\n query_wrapper_prompt, device_map=device, system_prompt=system_prompt)\n", (978, 1375), False, 'from llama_index.llms.huggingface import HuggingFaceLLM\n'), ((2079, 2113), 'projectgurukul.custom_models.model_utils.get_device_and_dtype', 'model_utils.get_device_and_dtype', ([], {}), '()\n', (2111, 2113), False, 'from projectgurukul.custom_models import model_utils\n'), ((2216, 2268), 'llama_index.core.prompts.PromptTemplate', 'PromptTemplate', (['"""Instruct: {query_str}\nOutput: """'], {}), '("""Instruct: {query_str}\nOutput: """)\n', (2230, 2268), False, 'from llama_index.core.prompts import PromptTemplate\n'), ((2284, 2724), 'llama_index.llms.huggingface.HuggingFaceLLM', 'HuggingFaceLLM', ([], {'context_window': 'context_window', 'is_chat_model': '(True)', 'model_name': '"""microsoft/phi-2"""', 'tokenizer_name': '"""microsoft/phi-2"""', 'max_new_tokens': 'max_new_tokens', 'stopping_ids': '[50256]', 'generate_kwargs': "{'do_sample': False}", 'model_kwargs': "{'torch_dtype': dtype, 'trust_remote_code': True}", 'query_wrapper_prompt': 'query_wrapper_prompt', 'messages_to_prompt': 'messages_to_prompt', 'device_map': 'device', 'system_prompt': 'system_prompt'}), "(context_window=context_window, is_chat_model=True,\n model_name='microsoft/phi-2', tokenizer_name='microsoft/phi-2',\n max_new_tokens=max_new_tokens, stopping_ids=[50256], generate_kwargs={\n 'do_sample': False}, model_kwargs={'torch_dtype': dtype,\n 'trust_remote_code': True}, query_wrapper_prompt=query_wrapper_prompt,\n messages_to_prompt=messages_to_prompt, device_map=device, system_prompt\n =system_prompt)\n", (2298, 2724), False, 'from llama_index.llms.huggingface import HuggingFaceLLM\n'), ((654, 675), 'logging.debug', 'logging.debug', (['prompt'], {}), '(prompt)\n', (667, 675), False, 'import logging\n'), ((2015, 2036), 'logging.debug', 'logging.debug', (['prompt'], {}), '(prompt)\n', (2028, 2036), False, 'import logging\n')]
from llama_index.multi_modal_llms import GeminiMultiModal from llama_index.program import MultiModalLLMCompletionProgram from llama_index.output_parsers import PydanticOutputParser from llama_index.multi_modal_llms.openai import OpenAIMultiModal from pydantic import BaseModel, Field from typing_extensions import Annotated damages_initial_prompt_str = """ The images are of a damaged {make_name} {model_name} {year} car. The images are taken from different angles. Please analyze them and tell me what parts are damaged and what is the estimated cost of repair. """ conditions_report_initial_prompt_str = """ The images are of a damaged vehicle. I need to fill a vehicle condition report based on the picture(s). Please fill the following details based on the image(s): FRONT 1. Roof 2. Windshield 3. Hood 4. Grill 5. Front bumper 6. Right mirror 7. Left mirror 8. Front right light 9. Front left light BACK 10. Rear Window 11. Trunk/TGate 12. Trunk/Cargo area 13. Rear bumper 14. Tail lights DRIVERS SIDE 15. Left fender 16. Left front door 17. Left rear door 18. Left rear quarter panel PASSENGER SIDE 19. Right rear quarter 20. Right rear door 21. Right front door 22. Right fender TIRES T1. Front left tire T2. Front right tire T3. Rear left tire T4. Rear right tire For each of the details you must answer with a score based on this descriptions to reflect the condition: - 0: Not visible - 1: Seems OK (no damage) - 2: Minor damage (scratches, dents) - 3: Major damage (bent, broken, missing) """ class DamagedPart(BaseModel): """Data model of the damaged part""" part_name: str = Field(..., description="Name of the damaged part") cost: float = Field(..., description="Estimated cost of repair") class DamagedParts(BaseModel): """Data model of the damaged parts""" damaged_parts: list[DamagedPart] = Field(..., description="List of damaged parts") summary: str = Field(..., description="Summary of the damage") class ConditionsReport(BaseModel): """Data model of conditions report""" roof: Annotated[int, Field(0, ge=0, le=3, description="Roof condition")] windshield: Annotated[int, Field(0, ge=0, le=3, description="Windshield condition")] hood: Annotated[int, Field(0, ge=0, le=3, description="Hood condition")] grill: Annotated[int, Field(0, ge=0, le=3, description="Grill condition")] front_bumper: Annotated[ int, Field(0, ge=0, le=3, description="Front bumper condition") ] right_mirror: Annotated[ int, Field(0, ge=0, le=3, description="Right mirror condition") ] left_mirror: Annotated[ int, Field(0, ge=0, le=3, description="Left mirror condition") ] front_right_light: Annotated[ int, Field(0, ge=0, le=3, description="Front right light condition") ] front_left_light: Annotated[ int, Field(0, ge=0, le=3, description="Front left light condition") ] # back rear_window: Annotated[ int, Field(0, ge=0, le=3, description="Rear window condition") ] trunk_tgate: Annotated[ int, Field(0, ge=0, le=3, description="Trunk/TGate condition") ] trunk_cargo_area: Annotated[ int, Field(0, ge=0, le=3, description="Trunk/Cargo area condition") ] rear_bumper: Annotated[ int, Field(0, ge=0, le=3, description="Rear bumper condition") ] right_tail_light: Annotated[ int, Field(0, ge=0, le=3, description="Right tail light condition") ] left_tail_light: Annotated[ int, Field(0, ge=0, le=3, description="Left tail light condition") ] # left left_rear_quarter: Annotated[ int, Field(0, ge=0, le=3, description="Left rear quarter condition") ] left_rear_door: Annotated[ int, Field(0, ge=0, le=3, description="Left rear door condition") ] left_front_door: Annotated[ int, Field(0, ge=0, le=3, description="Left front door condition") ] left_fender: Annotated[ int, Field(0, ge=0, le=3, description="Left fender condition") ] left_front_tire: Annotated[ int, Field(0, ge=0, le=3, description="Left front tire condition") ] left_rear_tire: Annotated[ int, Field(0, ge=0, le=3, description="Left rear tire condition") ] # right right_rear_quarter: Annotated[ int, Field(0, ge=0, le=3, description="Right rear quarter condition") ] right_rear_door: Annotated[ int, Field(0, ge=0, le=3, description="Right rear door condition") ] right_front_door: Annotated[ int, Field(0, ge=0, le=3, description="Right front door condition") ] right_fender: Annotated[ int, Field(0, ge=0, le=3, description="Right fender condition") ] right_front_tire: Annotated[ int, Field(0, ge=0, le=3, description="Right front tire condition") ] right_rear_tire: Annotated[ int, Field(0, ge=0, le=3, description="Right rear tire condition") ] def pydantic_llm( output_class, image_documents, prompt_template_str, selected_llm_model ): openai_mm_llm = OpenAIMultiModal(model="gpt-4-vision-preview") gemini_llm = GeminiMultiModal(model_name="models/gemini-pro-vision") multi_modal_llm = gemini_llm if selected_llm_model == "OpenAI": multi_modal_llm = openai_mm_llm llm_program = MultiModalLLMCompletionProgram.from_defaults( output_parser=PydanticOutputParser(output_class), image_documents=image_documents, prompt_template_str=prompt_template_str, multi_modal_llm=multi_modal_llm, verbose=True, ) response = llm_program() return response
[ "llama_index.multi_modal_llms.GeminiMultiModal", "llama_index.multi_modal_llms.openai.OpenAIMultiModal", "llama_index.output_parsers.PydanticOutputParser" ]
[((1607, 1657), 'pydantic.Field', 'Field', (['...'], {'description': '"""Name of the damaged part"""'}), "(..., description='Name of the damaged part')\n", (1612, 1657), False, 'from pydantic import BaseModel, Field\n'), ((1676, 1726), 'pydantic.Field', 'Field', (['...'], {'description': '"""Estimated cost of repair"""'}), "(..., description='Estimated cost of repair')\n", (1681, 1726), False, 'from pydantic import BaseModel, Field\n'), ((1842, 1889), 'pydantic.Field', 'Field', (['...'], {'description': '"""List of damaged parts"""'}), "(..., description='List of damaged parts')\n", (1847, 1889), False, 'from pydantic import BaseModel, Field\n'), ((1909, 1956), 'pydantic.Field', 'Field', (['...'], {'description': '"""Summary of the damage"""'}), "(..., description='Summary of the damage')\n", (1914, 1956), False, 'from pydantic import BaseModel, Field\n'), ((5072, 5118), 'llama_index.multi_modal_llms.openai.OpenAIMultiModal', 'OpenAIMultiModal', ([], {'model': '"""gpt-4-vision-preview"""'}), "(model='gpt-4-vision-preview')\n", (5088, 5118), False, 'from llama_index.multi_modal_llms.openai import OpenAIMultiModal\n'), ((5136, 5191), 'llama_index.multi_modal_llms.GeminiMultiModal', 'GeminiMultiModal', ([], {'model_name': '"""models/gemini-pro-vision"""'}), "(model_name='models/gemini-pro-vision')\n", (5152, 5191), False, 'from llama_index.multi_modal_llms import GeminiMultiModal\n'), ((2062, 2112), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Roof condition"""'}), "(0, ge=0, le=3, description='Roof condition')\n", (2067, 2112), False, 'from pydantic import BaseModel, Field\n'), ((2145, 2201), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Windshield condition"""'}), "(0, ge=0, le=3, description='Windshield condition')\n", (2150, 2201), False, 'from pydantic import BaseModel, Field\n'), ((2228, 2278), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Hood condition"""'}), "(0, ge=0, le=3, description='Hood condition')\n", (2233, 2278), False, 'from pydantic import BaseModel, Field\n'), ((2306, 2357), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Grill condition"""'}), "(0, ge=0, le=3, description='Grill condition')\n", (2311, 2357), False, 'from pydantic import BaseModel, Field\n'), ((2401, 2459), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Front bumper condition"""'}), "(0, ge=0, le=3, description='Front bumper condition')\n", (2406, 2459), False, 'from pydantic import BaseModel, Field\n'), ((2508, 2566), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Right mirror condition"""'}), "(0, ge=0, le=3, description='Right mirror condition')\n", (2513, 2566), False, 'from pydantic import BaseModel, Field\n'), ((2614, 2671), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Left mirror condition"""'}), "(0, ge=0, le=3, description='Left mirror condition')\n", (2619, 2671), False, 'from pydantic import BaseModel, Field\n'), ((2725, 2788), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Front right light condition"""'}), "(0, ge=0, le=3, description='Front right light condition')\n", (2730, 2788), False, 'from pydantic import BaseModel, Field\n'), ((2841, 2903), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Front left light condition"""'}), "(0, ge=0, le=3, description='Front left light condition')\n", (2846, 2903), False, 'from pydantic import BaseModel, Field\n'), ((2962, 3019), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Rear window condition"""'}), "(0, ge=0, le=3, description='Rear window condition')\n", (2967, 3019), False, 'from pydantic import BaseModel, Field\n'), ((3067, 3124), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Trunk/TGate condition"""'}), "(0, ge=0, le=3, description='Trunk/TGate condition')\n", (3072, 3124), False, 'from pydantic import BaseModel, Field\n'), ((3177, 3239), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Trunk/Cargo area condition"""'}), "(0, ge=0, le=3, description='Trunk/Cargo area condition')\n", (3182, 3239), False, 'from pydantic import BaseModel, Field\n'), ((3287, 3344), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Rear bumper condition"""'}), "(0, ge=0, le=3, description='Rear bumper condition')\n", (3292, 3344), False, 'from pydantic import BaseModel, Field\n'), ((3397, 3459), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Right tail light condition"""'}), "(0, ge=0, le=3, description='Right tail light condition')\n", (3402, 3459), False, 'from pydantic import BaseModel, Field\n'), ((3511, 3572), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Left tail light condition"""'}), "(0, ge=0, le=3, description='Left tail light condition')\n", (3516, 3572), False, 'from pydantic import BaseModel, Field\n'), ((3637, 3700), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Left rear quarter condition"""'}), "(0, ge=0, le=3, description='Left rear quarter condition')\n", (3642, 3700), False, 'from pydantic import BaseModel, Field\n'), ((3751, 3811), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Left rear door condition"""'}), "(0, ge=0, le=3, description='Left rear door condition')\n", (3756, 3811), False, 'from pydantic import BaseModel, Field\n'), ((3863, 3924), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Left front door condition"""'}), "(0, ge=0, le=3, description='Left front door condition')\n", (3868, 3924), False, 'from pydantic import BaseModel, Field\n'), ((3972, 4029), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Left fender condition"""'}), "(0, ge=0, le=3, description='Left fender condition')\n", (3977, 4029), False, 'from pydantic import BaseModel, Field\n'), ((4081, 4142), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Left front tire condition"""'}), "(0, ge=0, le=3, description='Left front tire condition')\n", (4086, 4142), False, 'from pydantic import BaseModel, Field\n'), ((4193, 4253), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Left rear tire condition"""'}), "(0, ge=0, le=3, description='Left rear tire condition')\n", (4198, 4253), False, 'from pydantic import BaseModel, Field\n'), ((4320, 4384), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Right rear quarter condition"""'}), "(0, ge=0, le=3, description='Right rear quarter condition')\n", (4325, 4384), False, 'from pydantic import BaseModel, Field\n'), ((4436, 4497), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Right rear door condition"""'}), "(0, ge=0, le=3, description='Right rear door condition')\n", (4441, 4497), False, 'from pydantic import BaseModel, Field\n'), ((4550, 4612), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Right front door condition"""'}), "(0, ge=0, le=3, description='Right front door condition')\n", (4555, 4612), False, 'from pydantic import BaseModel, Field\n'), ((4661, 4719), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Right fender condition"""'}), "(0, ge=0, le=3, description='Right fender condition')\n", (4666, 4719), False, 'from pydantic import BaseModel, Field\n'), ((4772, 4834), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Right front tire condition"""'}), "(0, ge=0, le=3, description='Right front tire condition')\n", (4777, 4834), False, 'from pydantic import BaseModel, Field\n'), ((4886, 4947), 'pydantic.Field', 'Field', (['(0)'], {'ge': '(0)', 'le': '(3)', 'description': '"""Right rear tire condition"""'}), "(0, ge=0, le=3, description='Right rear tire condition')\n", (4891, 4947), False, 'from pydantic import BaseModel, Field\n'), ((5393, 5427), 'llama_index.output_parsers.PydanticOutputParser', 'PydanticOutputParser', (['output_class'], {}), '(output_class)\n', (5413, 5427), False, 'from llama_index.output_parsers import PydanticOutputParser\n')]
import streamlit as st import pandas as pd import os from langchain.chat_models import ChatOpenAI from langchain.prompts.chat import ( ChatPromptTemplate, SystemMessagePromptTemplate, AIMessagePromptTemplate, HumanMessagePromptTemplate, ) from llama_index import ( SimpleDirectoryReader, VectorStoreIndex, ServiceContext, ) from llama_index.llms import LlamaCPP from llama_index.llms.llama_utils import ( messages_to_prompt, completion_to_prompt, ) import subprocess import time # set version # st.session_state.demo_lite = False # initialize model # llm = "tbd" print("BP 4 ") # initialize model- get 11m depending on st.session_state.demo_lite, and model def init_llm(model, demo_lite): # st.write("BP 4.1: model: ", model) if demo_lite == False: print("BP 5 : running full demo") if model == "Llama2-7b_CPP": model_path = "/Users/dheym/Library/CloudStorage/OneDrive-Personal/Documents/side_projects/GRDN/src/models/llama-2-7b-chat.Q4_K_M.gguf" print("model path: ", model_path) llm = LlamaCPP( # You can pass in the URL to a GGML model to download it automatically # model_url=model_url, # optionally, you can set the path to a pre-downloaded model instead of model_url model_path=model_path, temperature=0.1, max_new_tokens=1000, # llama2 has a context window of 4096 tokens, but we set it lower to allow for some wiggle room context_window=3000, # kwargs to pass to __call__() generate_kwargs={}, # kwargs to pass to __init__() # set to at least 1 to use GPU model_kwargs={"n_gpu_layers": 10}, # transform inputs into Llama2 format messages_to_prompt=messages_to_prompt, completion_to_prompt=completion_to_prompt, verbose=True, ) elif model == "deci-7b_CPP": model_path = "/Users/dheym/Library/CloudStorage/OneDrive-Personal/Documents/side_projects/GRDN/src/models/decilm-7b-uniform-gqa-q8_0.gguf" print("model path: ", model_path) llm = LlamaCPP( # You can pass in the URL to a GGML model to download it automatically # model_url=model_url, # optionally, you can set the path to a pre-downloaded model instead of model_url model_path=model_path, # model_url = "https://huggingface.co/Deci/DeciLM-7B-instruct-GGUF/resolve/main/decilm-7b-uniform-gqa-q8_0.gguf", temperature=0.1, max_new_tokens=1000, # llama2 has a context window of 4096 tokens, but we set it lower to allow for some wiggle room context_window=3000, # kwargs to pass to __call__() generate_kwargs={}, # kwargs to pass to __init__() # set to at least 1 to use GPU model_kwargs={"n_gpu_layers": 1}, # transform inputs into Llama2 format # messages_to_prompt=messages_to_prompt, # completion_to_prompt=completion_to_prompt, verbose=True, ) else: print("Error with chatbot model") return None return llm def parse_and_evaluate_text(text): # Find the indices of the opening and closing brackets opening_bracket_index = text.find("[") closing_bracket_index = text.find("]") if opening_bracket_index != -1 and closing_bracket_index != -1: # Extract the text within the brackets extracted_list = ( "[" + text[opening_bracket_index + 1 : closing_bracket_index] + "]" ) # Return the evaluated text list return eval(extracted_list) else: print("Error with parsing plant list") return None def chat_response(template, prompt_text, model, demo_lite): if model == "openai-gpt35turbo": chat = ChatOpenAI(temperature=0.1) system_message_prompt = SystemMessagePromptTemplate.from_template(template) human_template = "{text}" human_message_prompt = HumanMessagePromptTemplate.from_template(human_template) chat_prompt = ChatPromptTemplate.from_messages( [system_message_prompt, human_message_prompt] ) response = chat(chat_prompt.format_prompt(text=prompt_text).to_messages()) return response # return response.content elif model == "Llama2-7b_CPP" or model == "deci-7b_CPP": print("BP 5.1: running full demo, model: ", model) if "llm" not in st.session_state: st.session_state.llm = init_llm(model, demo_lite) response = st.session_state.llm.complete(template + prompt_text) return response.text else: print("Error with chatbot model") return None # # get the plant list from user input # def get_plant_list(input_plant_text, model): # template="You are a helpful assistant that knows all about gardening and plants and python data structures." # text = 'which of the elements of this list can be grown in a garden, [' + input_plant_text + ']? Return JUST a python list object containing the elements that can be grown in a garden. Do not include any other text or explanation.' # plant_list_text = chat_response(template, text, model) # plant_list = parse_and_evaluate_text(plant_list_text.content) # print(plant_list) # return plant_list # get plant care tips based on plant list def get_plant_care_tips(plant_list, model, demo_lite): plant_care_tips = "" template = "You are a helpful assistant that knows all about gardening, plants, and companion planting." text = ( "from this list of plants, [" + str(st.session_state.input_plants_raw) + "], generate 1-2 plant care tips for each plant based on what you know. Return just the plant care tips in HTML markdown format. Make sure to use ### for headers. Do not include any other text or explanation before or after the markdown. It must be in HTML markdown format." ) if model == "deci-7b_CPP": template = ( "### System: \n\n You are a helpful assistant that knows all about gardening, plants, and companion planting." + "\n\n ### User: Generate gardening tips. Return just the plant care tips in HTML markdown format. Make sure to use ### for headers. Do not include any other text or explanation before or after the markdown. It must be in HTML markdown format. \n\n" ) text = "### Assistant: \n\n" print("deci-7b_CPP") plant_care_tips = chat_response(template, text, model, demo_lite) # check to see if response contains ### or < for headers print("BP6", plant_care_tips) # st.write(plant_care_tips) if ( "###" not in plant_care_tips and "<" not in plant_care_tips and model != "deci-7b_CPP" ): # deci-7b_CPP has more general plant care tips st.write(plant_care_tips) print("Error with parsing plant care tips") # try again up to 5 times for i in range(5): print( "Error with parsing plant care tips. Trying for attempt #" + str(i + 1) ) plant_care_tips = chat_response(template, text, model, demo_lite) # check to see if response contains ### for headers if "###" not in plant_care_tips and "<" not in plant_care_tips: continue else: break # remove any text before the first ### or < in the response print(plant_care_tips) # look for either # or < for headers if "###" in plant_care_tips: plant_care_tips = "\n\n" + plant_care_tips[plant_care_tips.find("###") :] elif "<" in plant_care_tips: plant_care_tips = "\n\n" + plant_care_tips[plant_care_tips.find("<") :] else: print("funky formatting") plant_care_tips = plant_care_tips print(plant_care_tips) return plant_care_tips # get compatability matrix for companion planting def get_compatibility_matrix(plant_list, model, demo_lite): # Convert the compatibility matrix to a string with open("data/compatibilities_text.txt", "r") as file: # Read the contents of the file compatibility_text = file.read() plant_comp_context = compatibility_text template = "You are a helpful assistant that knows all about gardening, companion planting, and python data structures- specifically compatibility matrices." text = ( "from this list of plants, [" + str(plant_list) + "], Return JUST a python array (with values separated by commas like this: [[0,1],[1,0]]\n\n ) for companion plant compatibility. Each row and column should represent plants, and the element of the array will contain a -1, 0, or 1 depending on if the relationship between plants is antagonists, neutral, or companions, respectively. You must refer to this knowledge base of information on plant compatibility: \n\n, " + plant_comp_context + "\n\n A plant's compatibility with itself is always 0. Do not include any other text or explanation." ) compatibility_mat = chat_response(template, text, model, demo_lite) # Find the indices of the opening and closing brackets opening_bracket_index = compatibility_mat.content.find("[[") closing_bracket_index = compatibility_mat.content.find("]]") if opening_bracket_index != -1 and closing_bracket_index != -1: # Extract the text within the brackets extracted_mat = ( "[" + compatibility_mat.content[ opening_bracket_index + 1 : closing_bracket_index ] + "]]" ) # Return the evaluated mat # check to see if compatiblity matrix only contains values of -1, 0, or 1 if eval(extracted_mat).count("0") + eval(extracted_mat).count("1") == len( eval(extracted_mat) ): # continue pass else: # try again up to 5 times for i in range(5): print( "Error with parsing plant compatibility matrix. Trying for attempt #" + str(i + 1) ) print(extracted_mat) extracted_mat = chat_response( template + "remember, it MUST ONLY CONTAIN -1s, 0s, and 1s, like this structure: [[0,1],[1,0]]", text, model, demo_lite, ) # Extract the text within the brackets extracted_mat = ( "[" + compatibility_mat.content[ opening_bracket_index + 1 : closing_bracket_index ] + "]]" ) print(extracted_mat) total_count = 0 count_0 = extracted_mat.count("0") count_1 = extracted_mat.count("1") total_count = count_0 + count_1 print("matrix count of -1, 0, 1: ", total_count) # if count euals the number of plants squared, then we have a valid matrix print("plant_list_len: ", len(plant_list) ** 2) if total_count == (len(plant_list)) ** 2: # if count == eval(extracted_mat): print("success") return eval(extracted_mat) break else: print("Error with parsing plant compatibility matrix") # try again up to 5 times for i in range(5): print( "Error with parsing plant compatibility matrix. Trying for attempt #" + str(i + 1) ) extracted_mat = chat_response( template + "remember, it MUST ONLY CONTAIN -1s, 0s, and 1s, like this structure: [[0,1],[1,0]]", text, model, demo_lite, ) # Extract the text within the brackets extracted_mat = ( "[" + compatibility_mat.content[ opening_bracket_index + 1 : closing_bracket_index ] + "]]" ) print(extracted_mat) total_count = 0 count_0 = extracted_mat.count("0") count_1 = extracted_mat.count("1") total_count = count_0 + count_1 print("matrix count of -1, 0, 1: ", total_count) # if count euals the number of plants squared, then we have a valid matrix print("plant_list_len: ", len(plant_list) ** 2) if total_count == (len(plant_list)) ** 2: # if count == eval(extracted_mat): print("success") return eval(extracted_mat) break return None # get compatability matrix for companion planting via subsetting a hardcoded matrix # make plant_compatibility.csv into a matrix. it currently has indexes as rows and columns for plant names and then compatibility values as the values plant_compatibility = pd.read_csv("src/data/plant_compatibility.csv", index_col=0) def get_compatibility_matrix_2(plant_list): # Subset the matrix to only include the plants in the user's list plant_compatibility = st.session_state.raw_plant_compatibility.loc[ plant_list, plant_list ] # full matrix full_mat = st.session_state.raw_plant_compatibility.to_numpy() # Convert the DataFrame to a NumPy array plant_compatibility_matrix = plant_compatibility.to_numpy() # Get the list of original indices (from the DataFrame) original_indices = plant_compatibility.index.tolist() # Create a dictionary to map plant names to their original indices plant_index_mapping = {plant: index for index, plant in enumerate(original_indices)} # Return the matrix and the plant-index mapping return plant_compatibility_matrix, full_mat, plant_index_mapping # get plant groupings from LLM def get_seed_groupings_from_LLM(model, demo_lite): plant_groupings_evaluated = "no response yet" if demo_lite: # just return "no response yet" for now return plant_groupings_evaluated template = "You are a helpful assistant that only outputs python lists of lists of lists of plants." # make sure output is strictly and only a list of lists for one grouping text = ( """I am working on a gardening project and need to optimally group a set of plants based on their compatibility. Below is the compatibility matrix for the plants, where each value represents how well two plants grow together (positive values indicate good compatibility, negative values indicate poor compatibility). I also have specific constraints for planting: there are a certain number of plant beds (n_plant_beds), each bed can have a minimum of min_species species and a maximum of max_species species. Given these constraints, please suggest several groupings of these plants into n_plant_beds beds, optimizing for overall compatibility. Number of Plant Beds: """ + str(st.session_state.n_plant_beds) + """ Minimum Species per Bed: """ + str(st.session_state.min_species) + """ Maximum Species per Bed: """ + str(st.session_state.max_species) + """ Plants and Compatibility Matrix:""" + str( st.session_state.raw_plant_compatibility.loc[ st.session_state.input_plants_raw, st.session_state.input_plants_raw ] ) + """ Please provide a grouping that maximize positive interactions within each bed and minimize negative interactions, adhering to the specified bed constraints. Return a list of lists where each list represents an iteration of plant groupings. Each list within the list represents a bed, and each list within the bed represents the plants in that bed. sample output: [['plant1', 'plant2'] #bed1, ['plant3', 'plant4'] #bed2, ['plant1', 'plant3'] #bed3] another sample output: [['plant1', 'plant2', 'plant3'] #bed1, ['plant4', 'plant5', 'plant6'] #bed2, ['plant7', 'plant8', 'plant9'] #bed3] Note: the number of beds, the number of plants per bed, and the number of plants in the list may vary. Note: only output ONE python list of lists of plants. Do not include any other text or explanation. """ ) plant_groupings = chat_response(template, text, model, demo_lite) # check to see if we've cut off the response due to time limit. if so, return "no response yet" for now if plant_groupings == None: return "no response yet" print("response about LLMs choice on groupings", plant_groupings) # try to eval the string to a list of lists try: plant_groupings_evaluated = eval(plant_groupings) # check type of output print(type(plant_groupings_evaluated)) # we expect a list of lists except: print("Error with parsing plant groupings") # try again up to 5 times for i in range(5): print( "Error with parsing plant groupings. Trying for attempt #" + str(i + 1) ) plant_groupings = chat_response(template, text, model, demo_lite) print(plant_groupings) # try to eval the string to a list of lists try: # make sure plant1 is not in the output if "plant1" in plant_groupings.lower(): print("plant1 is in the output") continue else: plant_groupings_evaluated = eval(plant_groupings) print("successful eval; output: ", plant_groupings_evaluated) break except: # try to find the list of lists within the string opening_bracket_index = plant_groupings.find("[[") closing_bracket_index = plant_groupings.find("]]") if opening_bracket_index != -1 and closing_bracket_index != -1: # Extract the text within the brackets extracted_list = ( "[" + plant_groupings[ opening_bracket_index + 1 : closing_bracket_index ] + "]]" ) # Return the evaluated text list if "plant1" in extracted_list.lower(): print("plant1 is in the output") continue else: plant_groupings_evaluated = eval(extracted_list) print("successful eval; output: ", plant_groupings_evaluated) break else: print("Error with parsing plant groupings") continue return plant_groupings_evaluated
[ "llama_index.llms.LlamaCPP" ]
[((13454, 13514), 'pandas.read_csv', 'pd.read_csv', (['"""src/data/plant_compatibility.csv"""'], {'index_col': '(0)'}), "('src/data/plant_compatibility.csv', index_col=0)\n", (13465, 13514), True, 'import pandas as pd\n'), ((13774, 13825), 'streamlit.session_state.raw_plant_compatibility.to_numpy', 'st.session_state.raw_plant_compatibility.to_numpy', ([], {}), '()\n', (13823, 13825), True, 'import streamlit as st\n'), ((4124, 4151), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0.1)'}), '(temperature=0.1)\n', (4134, 4151), False, 'from langchain.chat_models import ChatOpenAI\n'), ((4184, 4235), 'langchain.prompts.chat.SystemMessagePromptTemplate.from_template', 'SystemMessagePromptTemplate.from_template', (['template'], {}), '(template)\n', (4225, 4235), False, 'from langchain.prompts.chat import ChatPromptTemplate, SystemMessagePromptTemplate, AIMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((4301, 4357), 'langchain.prompts.chat.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['human_template'], {}), '(human_template)\n', (4341, 4357), False, 'from langchain.prompts.chat import ChatPromptTemplate, SystemMessagePromptTemplate, AIMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((4380, 4459), 'langchain.prompts.chat.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (['[system_message_prompt, human_message_prompt]'], {}), '([system_message_prompt, human_message_prompt])\n', (4412, 4459), False, 'from langchain.prompts.chat import ChatPromptTemplate, SystemMessagePromptTemplate, AIMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((7159, 7184), 'streamlit.write', 'st.write', (['plant_care_tips'], {}), '(plant_care_tips)\n', (7167, 7184), True, 'import streamlit as st\n'), ((1088, 1343), 'llama_index.llms.LlamaCPP', 'LlamaCPP', ([], {'model_path': 'model_path', 'temperature': '(0.1)', 'max_new_tokens': '(1000)', 'context_window': '(3000)', 'generate_kwargs': '{}', 'model_kwargs': "{'n_gpu_layers': 10}", 'messages_to_prompt': 'messages_to_prompt', 'completion_to_prompt': 'completion_to_prompt', 'verbose': '(True)'}), "(model_path=model_path, temperature=0.1, max_new_tokens=1000,\n context_window=3000, generate_kwargs={}, model_kwargs={'n_gpu_layers': \n 10}, messages_to_prompt=messages_to_prompt, completion_to_prompt=\n completion_to_prompt, verbose=True)\n", (1096, 1343), False, 'from llama_index.llms import LlamaCPP\n'), ((4867, 4920), 'streamlit.session_state.llm.complete', 'st.session_state.llm.complete', (['(template + prompt_text)'], {}), '(template + prompt_text)\n', (4896, 4920), True, 'import streamlit as st\n'), ((2272, 2439), 'llama_index.llms.LlamaCPP', 'LlamaCPP', ([], {'model_path': 'model_path', 'temperature': '(0.1)', 'max_new_tokens': '(1000)', 'context_window': '(3000)', 'generate_kwargs': '{}', 'model_kwargs': "{'n_gpu_layers': 1}", 'verbose': '(True)'}), "(model_path=model_path, temperature=0.1, max_new_tokens=1000,\n context_window=3000, generate_kwargs={}, model_kwargs={'n_gpu_layers': \n 1}, verbose=True)\n", (2280, 2439), False, 'from llama_index.llms import LlamaCPP\n')]
import requests from bs4 import BeautifulSoup from typing import Tuple, Dict, Any from llama_index import Document def page_ingest(url) -> Tuple[str, Dict[str, Any]]: print("url", url) label = '' # Fetch the content from url response = requests.get(url) # Create a BeautifulSoup object and specify the parser soup = BeautifulSoup(response.text, 'html.parser') # Initialize an empty string to hold text text = '' # Initialize an empty dictionary to hold code code_blocks = {} # Extract all text not contained in a script or style element text_elements = soup.findAll(text=True) for element in text_elements: if element.parent.name not in ['script', 'style', 'a']: text += element.strip() print(len(text), url) document = Document(text=text, extra_info={'source': url}) print(document) return document def ingest_main(list_urls): list_of_docs = [] for url in list_urls: page = page_ingest(url) list_of_docs.append(page) return list_of_docs __all__ = ['ingest_main']
[ "llama_index.Document" ]
[((256, 273), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (268, 273), False, 'import requests\n'), ((344, 387), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.text', '"""html.parser"""'], {}), "(response.text, 'html.parser')\n", (357, 387), False, 'from bs4 import BeautifulSoup\n'), ((807, 854), 'llama_index.Document', 'Document', ([], {'text': 'text', 'extra_info': "{'source': url}"}), "(text=text, extra_info={'source': url})\n", (815, 854), False, 'from llama_index import Document\n')]
from pathlib import Path from llama_index import GPTSimpleVectorIndex, download_loader import sys def load_document(file): RDFReader = download_loader("RDFReader") loader = RDFReader() return loader.load_data(file=Path(file)) def query(index, prompt): print("PROMPT:", prompt) result = index.query(prompt) print("RESPONSE:") print(result.response) if __name__ == '__main__': RDF_FILE = 'docs.ttl' INDEX_FILE = 'docs.json' # live query - more expensive if sys.argv[1] == 'live': print("ENV: text-davinci") document = load_document(RDF_FILE) index = GPTSimpleVectorIndex(document) prompt = " ".join(sys.argv[2:]) query(index, prompt) elif sys.argv[1] == 'save-index': print("Saving index to docs.json...") document = load_document(RDF_FILE) index = GPTSimpleVectorIndex(document) index.save_to_disk(INDEX_FILE) # query from ada embeddings - cheaper else: print("ENV: text-embedding-ada-002-v2") index = GPTSimpleVectorIndex.load_from_disk(INDEX_FILE) prompt = " ".join(sys.argv[1:]) query(index, prompt)
[ "llama_index.GPTSimpleVectorIndex.load_from_disk", "llama_index.GPTSimpleVectorIndex", "llama_index.download_loader" ]
[((140, 168), 'llama_index.download_loader', 'download_loader', (['"""RDFReader"""'], {}), "('RDFReader')\n", (155, 168), False, 'from llama_index import GPTSimpleVectorIndex, download_loader\n'), ((620, 650), 'llama_index.GPTSimpleVectorIndex', 'GPTSimpleVectorIndex', (['document'], {}), '(document)\n', (640, 650), False, 'from llama_index import GPTSimpleVectorIndex, download_loader\n'), ((227, 237), 'pathlib.Path', 'Path', (['file'], {}), '(file)\n', (231, 237), False, 'from pathlib import Path\n'), ((864, 894), 'llama_index.GPTSimpleVectorIndex', 'GPTSimpleVectorIndex', (['document'], {}), '(document)\n', (884, 894), False, 'from llama_index import GPTSimpleVectorIndex, download_loader\n'), ((1050, 1097), 'llama_index.GPTSimpleVectorIndex.load_from_disk', 'GPTSimpleVectorIndex.load_from_disk', (['INDEX_FILE'], {}), '(INDEX_FILE)\n', (1085, 1097), False, 'from llama_index import GPTSimpleVectorIndex, download_loader\n')]
from llama_index import SimpleDirectoryReader, GPTVectorStoreIndex, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage from langchain.chat_models import ChatOpenAI import gradio as gr class ChatbotIndex: def __init__(self, model_name, directory_path): self.llm_predictor = LLMPredictor(ChatOpenAI(model_name=model_name)) self.service_context = ServiceContext.from_defaults( llm_predictor=self.llm_predictor) self.docs = SimpleDirectoryReader(directory_path).load_data() def construct_index(self): self.index = GPTVectorStoreIndex.from_documents( self.docs, service_context=self.service_context) self.index.storage_context.persist(persist_dir='index') return self.index def load_index(self): storage_context = StorageContext.from_defaults(persist_dir="index") self.index = load_index_from_storage(storage_context) def query_response(self, input_text): query_engine = self.index.as_query_engine() response = query_engine.query(input_text) print(response) return response.response def launch_chatbot_interface(): chatbot = ChatbotIndex(model_name='gpt-3.5-turbo', directory_path="data") chatbot.construct_index() iface = gr.Interface(fn=chatbot.query_response, inputs="text", outputs="text", title="LocalGPT Chatbot") iface.launch(share=True) if __name__ == "__main__": launch_chatbot_interface()
[ "llama_index.GPTVectorStoreIndex.from_documents", "llama_index.ServiceContext.from_defaults", "llama_index.StorageContext.from_defaults", "llama_index.SimpleDirectoryReader", "llama_index.load_index_from_storage" ]
[((1293, 1393), 'gradio.Interface', 'gr.Interface', ([], {'fn': 'chatbot.query_response', 'inputs': '"""text"""', 'outputs': '"""text"""', 'title': '"""LocalGPT Chatbot"""'}), "(fn=chatbot.query_response, inputs='text', outputs='text',\n title='LocalGPT Chatbot')\n", (1305, 1393), True, 'import gradio as gr\n'), ((385, 447), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'self.llm_predictor'}), '(llm_predictor=self.llm_predictor)\n', (413, 447), False, 'from llama_index import SimpleDirectoryReader, GPTVectorStoreIndex, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage\n'), ((584, 672), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['self.docs'], {'service_context': 'self.service_context'}), '(self.docs, service_context=self.\n service_context)\n', (618, 672), False, 'from llama_index import SimpleDirectoryReader, GPTVectorStoreIndex, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage\n'), ((824, 873), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""index"""'}), "(persist_dir='index')\n", (852, 873), False, 'from llama_index import SimpleDirectoryReader, GPTVectorStoreIndex, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage\n'), ((895, 935), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (918, 935), False, 'from llama_index import SimpleDirectoryReader, GPTVectorStoreIndex, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage\n'), ((319, 352), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': 'model_name'}), '(model_name=model_name)\n', (329, 352), False, 'from langchain.chat_models import ChatOpenAI\n'), ((481, 518), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['directory_path'], {}), '(directory_path)\n', (502, 518), False, 'from llama_index import SimpleDirectoryReader, GPTVectorStoreIndex, LLMPredictor, ServiceContext, StorageContext, load_index_from_storage\n')]
# Constants from llama_index.indices.postprocessor import MetadataReplacementPostProcessor, SentenceTransformerRerank from llama_index.prompts import ChatPromptTemplate from llama_index.llms import OpenAI, ChatMessage, MessageRole from llama_index import Document, ServiceContext, VectorStoreIndex from llama_index.node_parser import SentenceWindowNodeParser import os os.environ["TOKENIZERS_PARALLELISM"] = "false" MODEL = "gpt-4-1106-preview" TEMPERATURE = 0.1 EMBED_MODEL = "local:BAAI/bge-small-zh-v1.5" WINDOW_SIZE = 10 FILE_PATH = "./data/chat.txt" RERANK_MODEL = "BAAI/bge-reranker-base" TOP_N = 5 SIMILARITY_TOP_K = 10 class Chatbot: def __init__(self): self.node_parser = self.initialize_node_parser() self.llm = OpenAI(model=MODEL, temperature=TEMPERATURE) self.service_context = ServiceContext.from_defaults( llm=self.llm, embed_model=EMBED_MODEL, node_parser=self.node_parser, ) self.engine = self.initialize_engine() @staticmethod def initialize_node_parser(): def split(text): return text.split('\n') return SentenceWindowNodeParser.from_defaults( window_size=WINDOW_SIZE, window_metadata_key="window", original_text_metadata_key="original_text", sentence_splitter=split, ) def initialize_engine(self): text = self.read_text(FILE_PATH) sentence_index = VectorStoreIndex.from_documents( [Document(text=text)], service_context=self.service_context ) postproc = MetadataReplacementPostProcessor( target_metadata_key="window") rerank = SentenceTransformerRerank(top_n=TOP_N, model=RERANK_MODEL) wechat_bot_msgs = [ ChatMessage( role=MessageRole.SYSTEM, content=( "现在你将扮演我的克隆聊天机器人和朋友对话,请使用我的微信历史聊天记录作为参考,模仿这种特定的聊天风格和语气,以及句子回答的长度。注意使用类似的词汇、语句结构和表达方式、emoji的实用习惯。由于这个是微信聊天,请你说话不要太长太啰嗦。目标是使对话感觉自然、连贯,让他以为是在和我本人对话。" ), ), ChatMessage( role=MessageRole.USER, content=( "我的相关微信历史聊天记录如下\n" "---------------------\n" "{context_str}\n" "---------------------\n" "请你模仿以上的聊天风格,完成以下对话,你的回答只包含回复\n" "{query_str}\n" "我的回复:" ) ), ] wechat_bot_template = ChatPromptTemplate(wechat_bot_msgs) engine = sentence_index.as_query_engine( similarity_top_k=SIMILARITY_TOP_K, node_postprocessors=[ postproc, rerank] ) engine.update_prompts( {"response_synthesizer:text_qa_template": wechat_bot_template}) return engine @staticmethod def read_text(file_path): with open(file_path) as f: return f.read() def chat(self, input_text): query = f"{input_text}" response = self.engine.query(query) return response.response def main(): bot = Chatbot() print("Chatbot initialized. Start chatting!") history = "朋友:" while True: user_input = input("You: ") if user_input.lower() in ['exit', 'quit']: break response = bot.chat(history + user_input) history += user_input + "\n" + "我:" + response + "\n" + "朋友:" print("Bot:", response) if __name__ == "__main__": main()
[ "llama_index.ServiceContext.from_defaults", "llama_index.node_parser.SentenceWindowNodeParser.from_defaults", "llama_index.llms.OpenAI", "llama_index.llms.ChatMessage", "llama_index.prompts.ChatPromptTemplate", "llama_index.indices.postprocessor.MetadataReplacementPostProcessor", "llama_index.indices.postprocessor.SentenceTransformerRerank", "llama_index.Document" ]
[((744, 788), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': 'MODEL', 'temperature': 'TEMPERATURE'}), '(model=MODEL, temperature=TEMPERATURE)\n', (750, 788), False, 'from llama_index.llms import OpenAI, ChatMessage, MessageRole\n'), ((820, 921), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'self.llm', 'embed_model': 'EMBED_MODEL', 'node_parser': 'self.node_parser'}), '(llm=self.llm, embed_model=EMBED_MODEL,\n node_parser=self.node_parser)\n', (848, 921), False, 'from llama_index import Document, ServiceContext, VectorStoreIndex\n'), ((1142, 1313), 'llama_index.node_parser.SentenceWindowNodeParser.from_defaults', 'SentenceWindowNodeParser.from_defaults', ([], {'window_size': 'WINDOW_SIZE', 'window_metadata_key': '"""window"""', 'original_text_metadata_key': '"""original_text"""', 'sentence_splitter': 'split'}), "(window_size=WINDOW_SIZE,\n window_metadata_key='window', original_text_metadata_key=\n 'original_text', sentence_splitter=split)\n", (1180, 1313), False, 'from llama_index.node_parser import SentenceWindowNodeParser\n'), ((1599, 1661), 'llama_index.indices.postprocessor.MetadataReplacementPostProcessor', 'MetadataReplacementPostProcessor', ([], {'target_metadata_key': '"""window"""'}), "(target_metadata_key='window')\n", (1631, 1661), False, 'from llama_index.indices.postprocessor import MetadataReplacementPostProcessor, SentenceTransformerRerank\n'), ((1692, 1750), 'llama_index.indices.postprocessor.SentenceTransformerRerank', 'SentenceTransformerRerank', ([], {'top_n': 'TOP_N', 'model': 'RERANK_MODEL'}), '(top_n=TOP_N, model=RERANK_MODEL)\n', (1717, 1750), False, 'from llama_index.indices.postprocessor import MetadataReplacementPostProcessor, SentenceTransformerRerank\n'), ((2522, 2557), 'llama_index.prompts.ChatPromptTemplate', 'ChatPromptTemplate', (['wechat_bot_msgs'], {}), '(wechat_bot_msgs)\n', (2540, 2557), False, 'from llama_index.prompts import ChatPromptTemplate\n'), ((1792, 1994), 'llama_index.llms.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.SYSTEM', 'content': '"""现在你将扮演我的克隆聊天机器人和朋友对话,请使用我的微信历史聊天记录作为参考,模仿这种特定的聊天风格和语气,以及句子回答的长度。注意使用类似的词汇、语句结构和表达方式、emoji的实用习惯。由于这个是微信聊天,请你说话不要太长太啰嗦。目标是使对话感觉自然、连贯,让他以为是在和我本人对话。"""'}), "(role=MessageRole.SYSTEM, content=\n '现在你将扮演我的克隆聊天机器人和朋友对话,请使用我的微信历史聊天记录作为参考,模仿这种特定的聊天风格和语气,以及句子回答的长度。注意使用类似的词汇、语句结构和表达方式、emoji的实用习惯。由于这个是微信聊天,请你说话不要太长太啰嗦。目标是使对话感觉自然、连贯,让他以为是在和我本人对话。'\n )\n", (1803, 1994), False, 'from llama_index.llms import OpenAI, ChatMessage, MessageRole\n'), ((2085, 2264), 'llama_index.llms.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.USER', 'content': '"""我的相关微信历史聊天记录如下\n---------------------\n{context_str}\n---------------------\n请你模仿以上的聊天风格,完成以下对话,你的回答只包含回复\n{query_str}\n我的回复:"""'}), '(role=MessageRole.USER, content=\n """我的相关微信历史聊天记录如下\n---------------------\n{context_str}\n---------------------\n请你模仿以上的聊天风格,完成以下对话,你的回答只包含回复\n{query_str}\n我的回复:"""\n )\n', (2096, 2264), False, 'from llama_index.llms import OpenAI, ChatMessage, MessageRole\n'), ((1510, 1529), 'llama_index.Document', 'Document', ([], {'text': 'text'}), '(text=text)\n', (1518, 1529), False, 'from llama_index import Document, ServiceContext, VectorStoreIndex\n')]
import os import uvicorn import asyncio from fastapi import FastAPI from fastapi.responses import StreamingResponse from pydantic import BaseModel from llama_index import load_index_from_storage, StorageContext, ServiceContext, LLMPredictor, StorageContext from fastapi.middleware.cors import CORSMiddleware from langchain.chat_models import ChatOpenAI llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0, streaming=True) llm_predictor = LLMPredictor(llm=llm) service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor) app = FastAPI() app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) class QueryModel(BaseModel): index: str query: str async def astreamer(generator): try: for i in generator: yield (i) await asyncio.sleep(.1) except asyncio.CancelledError as e: print('cancelled') @app.post("/generate") async def query_index(query_model: QueryModel): storage_context = StorageContext.from_defaults(persist_dir=os.path.dirname(__file__)+'/3-index/' + query_model.index) index = load_index_from_storage(storage_context, service_context=service_context) query_engine = index.as_query_engine(streaming=True, similarity_top_k=1) response = query_engine.query(query_model.query) return StreamingResponse(astreamer(response.response_gen), media_type="text/event-stream") def start(): uvicorn.run("server:app", host="0.0.0.0", port=8000) if __name__ == "__main__": start()
[ "llama_index.ServiceContext.from_defaults", "llama_index.load_index_from_storage", "llama_index.LLMPredictor" ]
[((360, 429), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0)', 'streaming': '(True)'}), "(model_name='gpt-3.5-turbo', temperature=0, streaming=True)\n", (370, 429), False, 'from langchain.chat_models import ChatOpenAI\n'), ((446, 467), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (458, 467), False, 'from llama_index import load_index_from_storage, StorageContext, ServiceContext, LLMPredictor, StorageContext\n'), ((486, 543), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor'}), '(llm_predictor=llm_predictor)\n', (514, 543), False, 'from llama_index import load_index_from_storage, StorageContext, ServiceContext, LLMPredictor, StorageContext\n'), ((551, 560), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (558, 560), False, 'from fastapi import FastAPI\n'), ((1170, 1243), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {'service_context': 'service_context'}), '(storage_context, service_context=service_context)\n', (1193, 1243), False, 'from llama_index import load_index_from_storage, StorageContext, ServiceContext, LLMPredictor, StorageContext\n'), ((1489, 1541), 'uvicorn.run', 'uvicorn.run', (['"""server:app"""'], {'host': '"""0.0.0.0"""', 'port': '(8000)'}), "('server:app', host='0.0.0.0', port=8000)\n", (1500, 1541), False, 'import uvicorn\n'), ((878, 896), 'asyncio.sleep', 'asyncio.sleep', (['(0.1)'], {}), '(0.1)\n', (891, 896), False, 'import asyncio\n'), ((1099, 1124), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1114, 1124), False, 'import os\n')]
"""Handles chat interactions for WandBot. This module contains the Chat class which is responsible for handling chat interactions. It includes methods for initializing the chat, loading the storage context from an artifact, loading the chat engine, validating and formatting questions, formatting responses, and getting answers. It also contains a function for generating a list of chat messages from a given chat history. Typical usage example: config = ChatConfig() chat = Chat(config=config) chat_history = [] while True: question = input("You: ") if question.lower() == "quit": break else: response = chat( ChatRequest(question=question, chat_history=chat_history) ) chat_history.append( QuestionAnswer(question=question, answer=response.answer) ) print(f"WandBot: {response.answer}") print(f"Time taken: {response.time_taken}") """ import json from typing import Any, Dict, List, Optional, Tuple from llama_index import ServiceContext from llama_index.callbacks import ( CallbackManager, TokenCountingHandler, WandbCallbackHandler, trace_method, ) from llama_index.chat_engine import ContextChatEngine from llama_index.chat_engine.types import AgentChatResponse from llama_index.indices.postprocessor import CohereRerank from llama_index.llms import LLM, ChatMessage, MessageRole from llama_index.llms.generic_utils import messages_to_history_str from llama_index.memory import BaseMemory from llama_index.postprocessor.types import BaseNodePostprocessor from llama_index.schema import MetadataMode, NodeWithScore, QueryBundle from llama_index.tools import ToolOutput from weave.monitoring import StreamTable import wandb from wandbot.chat.config import ChatConfig from wandbot.chat.prompts import load_chat_prompt, partial_format from wandbot.chat.query_enhancer import CompleteQuery, QueryHandler from wandbot.chat.retriever import ( HybridRetriever, LanguageFilterPostprocessor, MetadataPostprocessor, Retriever, ) from wandbot.chat.schemas import ChatRequest, ChatResponse from wandbot.utils import Timer, get_logger, load_service_context logger = get_logger(__name__) def rebuild_full_prompt( message_templates: List[ChatMessage], result: Dict[str, Any] ) -> str: system_template = messages_to_history_str(message_templates[:-1]) query_str = result["question"] context = json.loads( result.get("source_documents", '[{"text": "", "source": ""}]') ) context_str = "" for idx, item in enumerate(context): context_str += f"source {idx+1}: " + item["source"] + "\n\n" context_str += "*" * 120 + "\n\n" context_str += item["text"] + "\n\n" context_str += "*" * 120 + "\n\n" context_str += "---\n\n" query_content = partial_format( message_templates[-1].content, query_str=query_str, context_str=context_str, ) system_template += ( f"\n\n{message_templates[-1].role}:\t{query_content}\n\n---\n\n" ) return system_template class WandbContextChatEngine(ContextChatEngine): def __init__( self, retriever: HybridRetriever, llm: LLM, memory: BaseMemory, prefix_messages: List[ChatMessage], node_postprocessors: Optional[List[BaseNodePostprocessor]] = None, context_template: Optional[str] = None, callback_manager: Optional[CallbackManager] = None, ) -> None: super().__init__( retriever=retriever, llm=llm, memory=memory, prefix_messages=prefix_messages, node_postprocessors=node_postprocessors, context_template=context_template, callback_manager=callback_manager, ) self._retriever: HybridRetriever = retriever def _generate_context( self, message: str, **kwargs ) -> Tuple[str, List[NodeWithScore]]: """Generate context information from a message.""" keywords = kwargs.get("keywords", []) sub_queries = kwargs.get("sub_queries", []) query_nodes = self._retriever.retrieve( message, is_avoid_query=kwargs.get("is_avoid_query") ) keywords_nodes = [] sub_query_nodes = [] if keywords: keywords_nodes = self._retriever.retrieve(" ".join(keywords)) if sub_queries: for sub_query in sub_queries: sub_query_nodes += self._retriever.retrieve(sub_query) nodes = query_nodes + keywords_nodes + sub_query_nodes for postprocessor in self._node_postprocessors: nodes = postprocessor.postprocess_nodes( nodes, query_bundle=QueryBundle(message) ) context_str = "\n\n---\n\n".join( [ n.node.get_content(metadata_mode=MetadataMode.LLM).strip() for n in nodes ] ) return context_str.strip(), nodes def _get_prefix_messages_with_context( self, context_str: str ) -> List[ChatMessage]: """Get the prefix messages with context.""" prefix_messages = self._prefix_messages context_str_w_sys_prompt = partial_format( prefix_messages[-1].content, context_str=context_str ) return [ *prefix_messages[:-1], ChatMessage( content=context_str_w_sys_prompt, role=MessageRole.USER, metadata={}, ), ] @trace_method("chat") def chat( self, message: str, chat_history: Optional[List[ChatMessage]] = None, **kwargs, ) -> AgentChatResponse: context_str_template, nodes = self._generate_context( message, keywords=kwargs.get("keywords", []), sub_queries=kwargs.get("sub_queries", []), is_avoid_query=kwargs.get("is_avoid_query"), ) prefix_messages = self._get_prefix_messages_with_context( context_str_template ) prefix_messages[-1] = ChatMessage( content=partial_format( prefix_messages[-1].content, query_str=message ), role="user", ) self._memory.put(prefix_messages[-1]) all_messages = prefix_messages chat_response = self._llm.chat(all_messages) ai_message = chat_response.message self._memory.put(ai_message) return AgentChatResponse( response=str(chat_response.message.content), sources=[ ToolOutput( tool_name="retriever", content=str(prefix_messages[0]), raw_input={"message": message}, raw_output=prefix_messages[0], ) ], source_nodes=nodes, ) class Chat: """Class for handling chat interactions. Attributes: config: An instance of ChatConfig containing configuration settings. run: An instance of wandb.Run for logging experiment information. wandb_callback: An instance of WandbCallbackHandler for handling Wandb callbacks. token_counter: An instance of TokenCountingHandler for counting tokens. callback_manager: An instance of CallbackManager for managing callbacks. qa_prompt: A string representing the chat prompt. """ def __init__(self, config: ChatConfig): """Initializes the Chat instance. Args: config: An instance of ChatConfig containing configuration settings. """ self.config = config self.run = wandb.init( project=self.config.wandb_project, entity=self.config.wandb_entity, job_type="chat", ) self.run._label(repo="wandbot") self.chat_table = StreamTable( table_name="chat_logs", project_name=self.config.wandb_project, entity_name=self.config.wandb_entity, # f"{self.config.wandb_entity}/{self.config.wandb_project}/chat_logs" ) self.wandb_callback = WandbCallbackHandler() self.token_counter = TokenCountingHandler() self.callback_manager = CallbackManager( [self.wandb_callback, self.token_counter] ) self.default_service_context = load_service_context( llm=self.config.chat_model_name, temperature=self.config.chat_temperature, max_retries=self.config.max_retries, embeddings_cache=str(self.config.embeddings_cache), callback_manager=self.callback_manager, ) self.fallback_service_context = load_service_context( llm=self.config.fallback_model_name, temperature=self.config.chat_temperature, max_retries=self.config.max_fallback_retries, embeddings_cache=str(self.config.embeddings_cache), callback_manager=self.callback_manager, ) self.qa_prompt = load_chat_prompt(f_name=self.config.chat_prompt) self.query_handler = QueryHandler() self.retriever = Retriever( run=self.run, service_context=self.fallback_service_context, callback_manager=self.callback_manager, ) def _load_chat_engine( self, service_context: ServiceContext, query_intent: str = "\n", language: str = "en", initial_k: int = 15, top_k: int = 5, ) -> WandbContextChatEngine: """Loads the chat engine with the given model name and maximum retries. Args: service_context: An instance of ServiceContext. query_intent: A string representing the query intent. language: A string representing the language. initial_k: An integer representing the initial number of documents to retrieve. top_k: An integer representing the number of documents to retrieve after reranking. Returns: An instance of ChatEngine. """ query_engine = self.retriever.load_query_engine( language=language, top_k=top_k, is_avoid_query=True if "avoid" in query_intent.lower() else False, ) self.qa_prompt = load_chat_prompt( f_name=self.config.chat_prompt, language_code=language, query_intent=query_intent, ) chat_engine_kwargs = dict( retriever=query_engine.retriever, storage_context=self.retriever.storage_context, service_context=service_context, similarity_top_k=initial_k, response_mode="compact", node_postprocessors=[ MetadataPostprocessor(), LanguageFilterPostprocessor(languages=[language, "python"]), CohereRerank(top_n=top_k, model="rerank-english-v2.0") if language == "en" else CohereRerank( top_n=top_k, model="rerank-multilingual-v2.0" ), ], prefix_messages=self.qa_prompt.message_templates, ) chat_engine = WandbContextChatEngine.from_defaults(**chat_engine_kwargs) return chat_engine def format_response(self, result: Dict[str, Any]) -> Dict[str, Any]: """Formats the response dictionary. Args: result: A dictionary representing the response. Returns: A formatted response dictionary. """ response = {} if result.get("source_documents", None): source_documents = [ { "source": doc.metadata["source"], "text": doc.text, } for doc in result["source_documents"] ] else: source_documents = [] response["answer"] = result["answer"] response["model"] = result["model"] if len(source_documents) and self.config.include_sources: response["source_documents"] = json.dumps(source_documents) response["sources"] = ",".join( [doc["source"] for doc in source_documents] ) else: response["source_documents"] = "" response["sources"] = "" return response def get_response( self, service_context: ServiceContext, query: str, language: str, chat_history: List[ChatMessage], query_intent: str, keywords: List[str] | None = None, sub_queries: List[str] | None = None, ) -> Dict[str, Any]: chat_engine = self._load_chat_engine( service_context=service_context, language=language, query_intent=query_intent, ) response = chat_engine.chat( message=query, chat_history=chat_history, keywords=keywords, sub_queries=sub_queries, is_avoid_query=True if "avoid" in query_intent.lower() else False, ) result = { "answer": response.response, "source_documents": response.source_nodes, "model": self.config.chat_model_name, } return result def get_answer( self, resolved_query: CompleteQuery, **kwargs, ) -> Dict[str, Any]: """Gets the answer for the given query and chat history. Args: resolved_query: An instance of ResolvedQuery representing the resolved query. Returns: A dictionary representing the answer. """ try: result = self.get_response( service_context=self.default_service_context, query=resolved_query.condensed_query, language=resolved_query.language, chat_history=resolved_query.chat_history, query_intent=resolved_query.intent_hints, ) except Exception as e: logger.warning(f"{self.config.chat_model_name} failed with {e}") logger.warning( f"Falling back to {self.config.fallback_model_name} model" ) try: result = self.get_response( service_context=self.fallback_service_context, query=resolved_query.cleaned_query, language=resolved_query.language, chat_history=resolved_query.chat_history, query_intent=resolved_query.intent_hints, ) except Exception as e: logger.error( f"{self.config.fallback_model_name} failed with {e}" ) result = { "answer": "\uE058" + " Sorry, there seems to be an issue with our LLM service. Please try again in some time.", "source_documents": None, "model": "None", } return self.format_response(result) def __call__(self, chat_request: ChatRequest) -> ChatResponse: """Handles the chat request and returns the chat response. Args: chat_request: An instance of ChatRequest representing the chat request. Returns: An instance of `ChatResponse` representing the chat response. """ try: with Timer() as timer: result = {} resolved_query = self.query_handler(chat_request) result = self.get_answer(resolved_query) usage_stats = { "total_tokens": self.token_counter.total_llm_token_count, "prompt_tokens": self.token_counter.prompt_llm_token_count, "completion_tokens": self.token_counter.completion_llm_token_count, } self.token_counter.reset_counts() result.update( dict( **{ "question": chat_request.question, "time_taken": timer.elapsed, "start_time": timer.start, "end_time": timer.stop, "application": chat_request.application, }, **usage_stats, ) ) self.run.log(usage_stats) system_template = rebuild_full_prompt( self.qa_prompt.message_templates, result ) result["system_prompt"] = system_template self.chat_table.log(result) return ChatResponse(**result) except Exception as e: with Timer() as timer: result = { "system_prompt": "", "question": chat_request.question, "answer": str(e), "model": "", "sources": "", "source_documents": "", "total_tokens": 0, "prompt_tokens": 0, "completion_tokens": 0, } result.update( { "time_taken": timer.elapsed, "start_time": timer.start, "end_time": timer.stop, } ) usage_stats = {} return ChatResponse(**result)
[ "llama_index.callbacks.WandbCallbackHandler", "llama_index.callbacks.TokenCountingHandler", "llama_index.llms.generic_utils.messages_to_history_str", "llama_index.llms.ChatMessage", "llama_index.callbacks.CallbackManager", "llama_index.indices.postprocessor.CohereRerank", "llama_index.schema.QueryBundle", "llama_index.callbacks.trace_method" ]
[((2223, 2243), 'wandbot.utils.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (2233, 2243), False, 'from wandbot.utils import Timer, get_logger, load_service_context\n'), ((2368, 2415), 'llama_index.llms.generic_utils.messages_to_history_str', 'messages_to_history_str', (['message_templates[:-1]'], {}), '(message_templates[:-1])\n', (2391, 2415), False, 'from llama_index.llms.generic_utils import messages_to_history_str\n'), ((2871, 2966), 'wandbot.chat.prompts.partial_format', 'partial_format', (['message_templates[-1].content'], {'query_str': 'query_str', 'context_str': 'context_str'}), '(message_templates[-1].content, query_str=query_str,\n context_str=context_str)\n', (2885, 2966), False, 'from wandbot.chat.prompts import load_chat_prompt, partial_format\n'), ((5606, 5626), 'llama_index.callbacks.trace_method', 'trace_method', (['"""chat"""'], {}), "('chat')\n", (5618, 5626), False, 'from llama_index.callbacks import CallbackManager, TokenCountingHandler, WandbCallbackHandler, trace_method\n'), ((5289, 5357), 'wandbot.chat.prompts.partial_format', 'partial_format', (['prefix_messages[-1].content'], {'context_str': 'context_str'}), '(prefix_messages[-1].content, context_str=context_str)\n', (5303, 5357), False, 'from wandbot.chat.prompts import load_chat_prompt, partial_format\n'), ((7759, 7859), 'wandb.init', 'wandb.init', ([], {'project': 'self.config.wandb_project', 'entity': 'self.config.wandb_entity', 'job_type': '"""chat"""'}), "(project=self.config.wandb_project, entity=self.config.\n wandb_entity, job_type='chat')\n", (7769, 7859), False, 'import wandb\n'), ((7968, 8085), 'weave.monitoring.StreamTable', 'StreamTable', ([], {'table_name': '"""chat_logs"""', 'project_name': 'self.config.wandb_project', 'entity_name': 'self.config.wandb_entity'}), "(table_name='chat_logs', project_name=self.config.wandb_project,\n entity_name=self.config.wandb_entity)\n", (7979, 8085), False, 'from weave.monitoring import StreamTable\n'), ((8242, 8264), 'llama_index.callbacks.WandbCallbackHandler', 'WandbCallbackHandler', ([], {}), '()\n', (8262, 8264), False, 'from llama_index.callbacks import CallbackManager, TokenCountingHandler, WandbCallbackHandler, trace_method\n'), ((8294, 8316), 'llama_index.callbacks.TokenCountingHandler', 'TokenCountingHandler', ([], {}), '()\n', (8314, 8316), False, 'from llama_index.callbacks import CallbackManager, TokenCountingHandler, WandbCallbackHandler, trace_method\n'), ((8349, 8407), 'llama_index.callbacks.CallbackManager', 'CallbackManager', (['[self.wandb_callback, self.token_counter]'], {}), '([self.wandb_callback, self.token_counter])\n', (8364, 8407), False, 'from llama_index.callbacks import CallbackManager, TokenCountingHandler, WandbCallbackHandler, trace_method\n'), ((9140, 9188), 'wandbot.chat.prompts.load_chat_prompt', 'load_chat_prompt', ([], {'f_name': 'self.config.chat_prompt'}), '(f_name=self.config.chat_prompt)\n', (9156, 9188), False, 'from wandbot.chat.prompts import load_chat_prompt, partial_format\n'), ((9218, 9232), 'wandbot.chat.query_enhancer.QueryHandler', 'QueryHandler', ([], {}), '()\n', (9230, 9232), False, 'from wandbot.chat.query_enhancer import CompleteQuery, QueryHandler\n'), ((9258, 9372), 'wandbot.chat.retriever.Retriever', 'Retriever', ([], {'run': 'self.run', 'service_context': 'self.fallback_service_context', 'callback_manager': 'self.callback_manager'}), '(run=self.run, service_context=self.fallback_service_context,\n callback_manager=self.callback_manager)\n', (9267, 9372), False, 'from wandbot.chat.retriever import HybridRetriever, LanguageFilterPostprocessor, MetadataPostprocessor, Retriever\n'), ((10414, 10517), 'wandbot.chat.prompts.load_chat_prompt', 'load_chat_prompt', ([], {'f_name': 'self.config.chat_prompt', 'language_code': 'language', 'query_intent': 'query_intent'}), '(f_name=self.config.chat_prompt, language_code=language,\n query_intent=query_intent)\n', (10430, 10517), False, 'from wandbot.chat.prompts import load_chat_prompt, partial_format\n'), ((5444, 5529), 'llama_index.llms.ChatMessage', 'ChatMessage', ([], {'content': 'context_str_w_sys_prompt', 'role': 'MessageRole.USER', 'metadata': '{}'}), '(content=context_str_w_sys_prompt, role=MessageRole.USER,\n metadata={})\n', (5455, 5529), False, 'from llama_index.llms import LLM, ChatMessage, MessageRole\n'), ((12216, 12244), 'json.dumps', 'json.dumps', (['source_documents'], {}), '(source_documents)\n', (12226, 12244), False, 'import json\n'), ((16815, 16837), 'wandbot.chat.schemas.ChatResponse', 'ChatResponse', ([], {}), '(**result)\n', (16827, 16837), False, 'from wandbot.chat.schemas import ChatRequest, ChatResponse\n'), ((6208, 6270), 'wandbot.chat.prompts.partial_format', 'partial_format', (['prefix_messages[-1].content'], {'query_str': 'message'}), '(prefix_messages[-1].content, query_str=message)\n', (6222, 6270), False, 'from wandbot.chat.prompts import load_chat_prompt, partial_format\n'), ((15587, 15594), 'wandbot.utils.Timer', 'Timer', ([], {}), '()\n', (15592, 15594), False, 'from wandbot.utils import Timer, get_logger, load_service_context\n'), ((17583, 17605), 'wandbot.chat.schemas.ChatResponse', 'ChatResponse', ([], {}), '(**result)\n', (17595, 17605), False, 'from wandbot.chat.schemas import ChatRequest, ChatResponse\n'), ((4785, 4805), 'llama_index.schema.QueryBundle', 'QueryBundle', (['message'], {}), '(message)\n', (4796, 4805), False, 'from llama_index.schema import MetadataMode, NodeWithScore, QueryBundle\n'), ((10874, 10897), 'wandbot.chat.retriever.MetadataPostprocessor', 'MetadataPostprocessor', ([], {}), '()\n', (10895, 10897), False, 'from wandbot.chat.retriever import HybridRetriever, LanguageFilterPostprocessor, MetadataPostprocessor, Retriever\n'), ((10915, 10974), 'wandbot.chat.retriever.LanguageFilterPostprocessor', 'LanguageFilterPostprocessor', ([], {'languages': "[language, 'python']"}), "(languages=[language, 'python'])\n", (10942, 10974), False, 'from wandbot.chat.retriever import HybridRetriever, LanguageFilterPostprocessor, MetadataPostprocessor, Retriever\n'), ((16886, 16893), 'wandbot.utils.Timer', 'Timer', ([], {}), '()\n', (16891, 16893), False, 'from wandbot.utils import Timer, get_logger, load_service_context\n'), ((10992, 11046), 'llama_index.indices.postprocessor.CohereRerank', 'CohereRerank', ([], {'top_n': 'top_k', 'model': '"""rerank-english-v2.0"""'}), "(top_n=top_k, model='rerank-english-v2.0')\n", (11004, 11046), False, 'from llama_index.indices.postprocessor import CohereRerank\n'), ((11104, 11163), 'llama_index.indices.postprocessor.CohereRerank', 'CohereRerank', ([], {'top_n': 'top_k', 'model': '"""rerank-multilingual-v2.0"""'}), "(top_n=top_k, model='rerank-multilingual-v2.0')\n", (11116, 11163), False, 'from llama_index.indices.postprocessor import CohereRerank\n')]
import streamlit as st from llama_index import VectorStoreIndex from llama_index.vector_stores import ChromaVectorStore import chromadb st.title('Precident') # load and prime the index db2 = chromadb.PersistentClient(path="./chroma_db") chroma_collection = db2.get_or_create_collection("quickstart") vector_store = ChromaVectorStore(chroma_collection=chroma_collection) index = VectorStoreIndex.from_vector_store( vector_store, ) query_engine = index.as_query_engine() # add search bar search = st.text_input('Search', 'Enter query here') response = query_engine.query(search) st.write(response)
[ "llama_index.VectorStoreIndex.from_vector_store", "llama_index.vector_stores.ChromaVectorStore" ]
[((137, 158), 'streamlit.title', 'st.title', (['"""Precident"""'], {}), "('Precident')\n", (145, 158), True, 'import streamlit as st\n'), ((193, 238), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': '"""./chroma_db"""'}), "(path='./chroma_db')\n", (218, 238), False, 'import chromadb\n'), ((317, 371), 'llama_index.vector_stores.ChromaVectorStore', 'ChromaVectorStore', ([], {'chroma_collection': 'chroma_collection'}), '(chroma_collection=chroma_collection)\n', (334, 371), False, 'from llama_index.vector_stores import ChromaVectorStore\n'), ((380, 428), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', (['vector_store'], {}), '(vector_store)\n', (414, 428), False, 'from llama_index import VectorStoreIndex\n'), ((502, 545), 'streamlit.text_input', 'st.text_input', (['"""Search"""', '"""Enter query here"""'], {}), "('Search', 'Enter query here')\n", (515, 545), True, 'import streamlit as st\n'), ((584, 602), 'streamlit.write', 'st.write', (['response'], {}), '(response)\n', (592, 602), True, 'import streamlit as st\n')]
import os import time from llama_index import ServiceContext, StorageContext, VectorStoreIndex, load_index_from_storage, set_global_service_context import llama_index from Models import Models from DocumentClass import DocumentClass class MediawikiLLM: service_context = None mediawiki_url = None api_url = None DocumentClass = None index = None index_filename = None query_engine = None def __init__(self, mediawiki_url, api_url): self.mediawiki_url = mediawiki_url self.DocumentClass = DocumentClass(api_url) llm = Models.CreateLlamaCCP( model_url=os.getenv("MODEL_URL"), model_path=os.getenv("MODEL_PATH")) # llm = Models.CreateHuggingFaceLLM(model_name="Writer/camel-5b-hf") self.service_context = ServiceContext.from_defaults( llm=llm, embed_model="local", chunk_size=1024, ) def init_from_mediawiki(self): set_global_service_context(self.service_context) if os.path.isdir(str(os.getenv("PERSISTENT_STORAGE_DIR"))): storage_context = StorageContext.from_defaults( persist_dir=os.getenv("PERSISTENT_STORAGE_DIR")) self.index = load_index_from_storage(storage_context) else: self.DocumentClass.mediawiki_get_all_pages(self.mediawiki_url) self.index = VectorStoreIndex.from_documents( self.DocumentClass.documents, service_context=self.service_context) if os.getenv("PERSISTENT_STORAGE_DIR") is not None: self.index.storage_context.persist( os.getenv("PERSISTENT_STORAGE_DIR")) self.query_engine = self.index.as_query_engine() def init_no_documents(self): self.index = llama_index.indices.empty.EmptyIndex( service_context=self.service_context) self.query_engine = self.index.as_query_engine() def updateVectorStore(self, type: str, page_url: str): if type == 'edit' or type == 'create': print("create/edit " + page_url) self.DocumentClass.mediawiki_update_page(page_url) elif type == 'delete': print("delete " + page_url) self.DocumentClass.mediawiki_delete_page(page_url) self.index.refresh(self.DocumentClass.documents)
[ "llama_index.VectorStoreIndex.from_documents", "llama_index.ServiceContext.from_defaults", "llama_index.load_index_from_storage", "llama_index.set_global_service_context", "llama_index.indices.empty.EmptyIndex" ]
[((542, 564), 'DocumentClass.DocumentClass', 'DocumentClass', (['api_url'], {}), '(api_url)\n', (555, 564), False, 'from DocumentClass import DocumentClass\n'), ((795, 870), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': '"""local"""', 'chunk_size': '(1024)'}), "(llm=llm, embed_model='local', chunk_size=1024)\n", (823, 870), False, 'from llama_index import ServiceContext, StorageContext, VectorStoreIndex, load_index_from_storage, set_global_service_context\n'), ((962, 1010), 'llama_index.set_global_service_context', 'set_global_service_context', (['self.service_context'], {}), '(self.service_context)\n', (988, 1010), False, 'from llama_index import ServiceContext, StorageContext, VectorStoreIndex, load_index_from_storage, set_global_service_context\n'), ((1789, 1863), 'llama_index.indices.empty.EmptyIndex', 'llama_index.indices.empty.EmptyIndex', ([], {'service_context': 'self.service_context'}), '(service_context=self.service_context)\n', (1825, 1863), False, 'import llama_index\n'), ((1230, 1270), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (1253, 1270), False, 'from llama_index import ServiceContext, StorageContext, VectorStoreIndex, load_index_from_storage, set_global_service_context\n'), ((1386, 1489), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['self.DocumentClass.documents'], {'service_context': 'self.service_context'}), '(self.DocumentClass.documents,\n service_context=self.service_context)\n', (1417, 1489), False, 'from llama_index import ServiceContext, StorageContext, VectorStoreIndex, load_index_from_storage, set_global_service_context\n'), ((625, 647), 'os.getenv', 'os.getenv', (['"""MODEL_URL"""'], {}), "('MODEL_URL')\n", (634, 647), False, 'import os\n'), ((660, 683), 'os.getenv', 'os.getenv', (['"""MODEL_PATH"""'], {}), "('MODEL_PATH')\n", (669, 683), False, 'import os\n'), ((1041, 1076), 'os.getenv', 'os.getenv', (['"""PERSISTENT_STORAGE_DIR"""'], {}), "('PERSISTENT_STORAGE_DIR')\n", (1050, 1076), False, 'import os\n'), ((1518, 1553), 'os.getenv', 'os.getenv', (['"""PERSISTENT_STORAGE_DIR"""'], {}), "('PERSISTENT_STORAGE_DIR')\n", (1527, 1553), False, 'import os\n'), ((1168, 1203), 'os.getenv', 'os.getenv', (['"""PERSISTENT_STORAGE_DIR"""'], {}), "('PERSISTENT_STORAGE_DIR')\n", (1177, 1203), False, 'import os\n'), ((1639, 1674), 'os.getenv', 'os.getenv', (['"""PERSISTENT_STORAGE_DIR"""'], {}), "('PERSISTENT_STORAGE_DIR')\n", (1648, 1674), False, 'import os\n')]
import os import shutil import chromadb import redis from llama_index.core.indices import VectorStoreIndex from llama_index.core.storage import StorageContext from app.tools import FindEmbeddingsPath from llama_index.vector_stores.redis import RedisVectorStore from llama_index.vector_stores.chroma import ChromaVectorStore def vector_init(brain, project): path = FindEmbeddingsPath(project.model.name) if project.model.vectorstore == "chroma": db = chromadb.PersistentClient(path=path) chroma_collection = db.get_or_create_collection(project.model.name) vector_store = ChromaVectorStore(chroma_collection=chroma_collection) storage_context = StorageContext.from_defaults( vector_store=vector_store) index = VectorStoreIndex.from_vector_store( vector_store, storage_context=storage_context, embed_model=brain.getEmbedding( project.model.embeddings)) return index elif project.model.vectorstore == "redis": if path is None or len(os.listdir(path)) == 0: vector_store = RedisVectorStore( redis_url="redis://" + os.environ["REDIS_HOST"] + ":" + os.environ["REDIS_PORT"], index_name=project.model.name, metadata_fields=["source", "keywords"], index_prefix="llama_" + project.model.name, overwrite=False) storage_context = StorageContext.from_defaults( vector_store=vector_store) index = VectorStoreIndex.from_vector_store(vector_store, storage_context=storage_context, embed_model=brain.getEmbedding(project.model.embeddings)) return index else: return vector_load(brain, project) def vector_save(project): if project.model.vectorstore == "chroma": pass elif project.model.vectorstore == "redis": try: project.db.vector_store.persist(persist_path="") except BaseException: print("REDIS - Error saving vectors") def vector_load(brain, project): if project.model.vectorstore == "chroma": return vector_init(brain, project) if project.model.vectorstore == "redis": vector_store = RedisVectorStore( redis_url="redis://" + os.environ["REDIS_HOST"] + ":" + os.environ["REDIS_PORT"], index_name=project.model.name, metadata_fields=["source", "keywords"], index_prefix="llama_" + project.model.name, overwrite=False) return VectorStoreIndex.from_vector_store(embed_model=brain.getEmbedding( project.model.embeddings), vector_store=vector_store) def vector_list(project): output = [] if project.model.vectorstore == "chroma": path = FindEmbeddingsPath(project.model.name) db = chromadb.PersistentClient(path=path) collection = db.get_or_create_collection(project.model.name) docs = collection.get( include=["metadatas"] ) index = 0 for metadata in docs["metadatas"]: if metadata["source"] not in output: output.append(metadata["source"]) index = index + 1 elif project.model.vectorstore == "redis": lredis = redis.Redis( host=os.environ["REDIS_HOST"], port=os.environ["REDIS_PORT"], decode_responses=True) keys = lredis.keys("llama_" + project.model.name + "/*") for key in keys: source = lredis.hget(key, "source") if source not in output: output.append(source) return {"embeddings": output} def vector_list_source(project, source): output = [] if project.model.vectorstore == "chroma": path = FindEmbeddingsPath(project.model.name) db = chromadb.PersistentClient(path=path) collection = db.get_or_create_collection(project.model.name) docs = collection.get( include=["metadatas"] ) index = 0 for metadata in docs["metadatas"]: if metadata["source"] == source: output.append(metadata["source"]) index = index + 1 elif project.model.vectorstore == "redis": lredis = redis.Redis( host=os.environ["REDIS_HOST"], port=os.environ["REDIS_PORT"], decode_responses=True) keys = lredis.keys("llama_" + project.model.name + "/*") for key in keys: sourcer = lredis.hget(key, "source").strip() id = lredis.hget(key, "id").strip() if source == sourcer: output.append({"source": source, "id": id, "score": 1}) return output def vector_info(project): if project.model.vectorstore == "chroma": path = FindEmbeddingsPath(project.model.name) db = chromadb.PersistentClient(path=path) collection = db.get_or_create_collection(project.model.name) docs = collection.get( include=["metadatas"] ) return len(docs["ids"]) elif project.model.vectorstore == "redis": lredis = redis.Redis( host=os.environ["REDIS_HOST"], port=os.environ["REDIS_PORT"], decode_responses=True) keys = lredis.keys("llama_" + project.model.name + "/*") return len(keys) def vector_find_source(project, source): docs = [] if project.model.vectorstore == "chroma": path = FindEmbeddingsPath(project.model.name) db = chromadb.PersistentClient(path=path) collection = db.get_or_create_collection(project.model.name) docs = collection.get(where={'source': source}) elif project.model.vectorstore == "redis": lredis = redis.Redis( host=os.environ["REDIS_HOST"], port=os.environ["REDIS_PORT"], decode_responses=True) keys = lredis.keys("llama_" + project.model.name + "/*") ids = [] metadatas = [] documents = [] for key in keys: lsource = lredis.hget(key, "source") if lsource == source: ids.append(key) metadatas.append( {"source": lsource, "keywords": lredis.hget(key, "keywords")}) documents.append(lredis.hget(key, "text")) docs = {"ids": ids, "metadatas": metadatas, "documents": documents} return docs def vector_find_id(project, id): output = {"id": id} if project.model.vectorstore == "chroma": path = FindEmbeddingsPath(project.model.name) db = chromadb.PersistentClient(path=path) collection = db.get_or_create_collection(project.model.name) docs = collection.get(ids=[id]) output["metadata"] = { k: v for k, v in docs["metadatas"][0].items() if not k.startswith('_')} output["document"] = docs["documents"][0] elif project.model.vectorstore == "redis": lredis = redis.Redis( host=os.environ["REDIS_HOST"], port=os.environ["REDIS_PORT"], decode_responses=True) ids = "llama_" + project.model.name + "/vector_" + id keys = lredis.hkeys(ids) keys = [k for k in keys if not k.startswith( '_') and k != "vector" and k != "text" and k != "doc_id" and k != "id"] data = lredis.hmget(ids, keys) text = lredis.hget(ids, "text") output["metadata"] = dict(zip(keys, data)) output["document"] = text return output def vector_delete(project): if project.model.vectorstore == "chroma": try: embeddingsPath = FindEmbeddingsPath(project.model.name) shutil.rmtree(embeddingsPath, ignore_errors=True) except BaseException: pass elif project.model.vectorstore == "redis": lredis = redis.Redis( host=os.environ["REDIS_HOST"], port=os.environ["REDIS_PORT"], decode_responses=True) try: lredis.ft(project.model.name).dropindex(True) embeddingsPath = FindEmbeddingsPath(project.model.name) shutil.rmtree(embeddingsPath, ignore_errors=True) except BaseException: pass def vector_delete_source(project, source): ids = [] if project.model.vectorstore == "chroma": path = FindEmbeddingsPath(project.model.name) db = chromadb.PersistentClient(path=path) collection = db.get_or_create_collection(project.model.name) ids = collection.get(where={'source': source})['ids'] if len(ids): collection.delete(ids) elif project.model.vectorstore == "redis": lredis = redis.Redis( host=os.environ["REDIS_HOST"], port=os.environ["REDIS_PORT"], decode_responses=True) keys = lredis.keys("llama_" + project.model.name + "/*") for key in keys: lsource = lredis.hget(key, "source") if lsource == source: ids.append(key) lredis.delete(key) return ids def vector_delete_id(project, id): if project.model.vectorstore == "chroma": path = FindEmbeddingsPath(project.model.name) db = chromadb.PersistentClient(path=path) collection = db.get_or_create_collection(project.model.name) ids = collection.get(ids=[id])['ids'] if len(ids): collection.delete(ids) elif project.model.vectorstore == "redis": lredis = redis.Redis( host=os.environ["REDIS_HOST"], port=os.environ["REDIS_PORT"], decode_responses=True) lredis.delete(id) return id def vector_reset(brain, project): if project.model.vectorstore == "chroma": path = FindEmbeddingsPath(project.model.name) db = chromadb.PersistentClient(path=path) db.reset() elif project.model.vectorstore == "redis": lredis = redis.Redis( host=os.environ["REDIS_HOST"], port=os.environ["REDIS_PORT"], decode_responses=True) lredis.ft(project.model.name).dropindex(True) project.db = vector_init(brain, project)
[ "llama_index.core.storage.StorageContext.from_defaults", "llama_index.vector_stores.chroma.ChromaVectorStore", "llama_index.vector_stores.redis.RedisVectorStore" ]
[((370, 408), 'app.tools.FindEmbeddingsPath', 'FindEmbeddingsPath', (['project.model.name'], {}), '(project.model.name)\n', (388, 408), False, 'from app.tools import FindEmbeddingsPath\n'), ((469, 505), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'path'}), '(path=path)\n', (494, 505), False, 'import chromadb\n'), ((605, 659), 'llama_index.vector_stores.chroma.ChromaVectorStore', 'ChromaVectorStore', ([], {'chroma_collection': 'chroma_collection'}), '(chroma_collection=chroma_collection)\n', (622, 659), False, 'from llama_index.vector_stores.chroma import ChromaVectorStore\n'), ((687, 742), 'llama_index.core.storage.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (715, 742), False, 'from llama_index.core.storage import StorageContext\n'), ((2278, 2522), 'llama_index.vector_stores.redis.RedisVectorStore', 'RedisVectorStore', ([], {'redis_url': "('redis://' + os.environ['REDIS_HOST'] + ':' + os.environ['REDIS_PORT'])", 'index_name': 'project.model.name', 'metadata_fields': "['source', 'keywords']", 'index_prefix': "('llama_' + project.model.name)", 'overwrite': '(False)'}), "(redis_url='redis://' + os.environ['REDIS_HOST'] + ':' + os\n .environ['REDIS_PORT'], index_name=project.model.name, metadata_fields=\n ['source', 'keywords'], index_prefix='llama_' + project.model.name,\n overwrite=False)\n", (2294, 2522), False, 'from llama_index.vector_stores.redis import RedisVectorStore\n'), ((2859, 2897), 'app.tools.FindEmbeddingsPath', 'FindEmbeddingsPath', (['project.model.name'], {}), '(project.model.name)\n', (2877, 2897), False, 'from app.tools import FindEmbeddingsPath\n'), ((2911, 2947), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'path'}), '(path=path)\n', (2936, 2947), False, 'import chromadb\n'), ((3851, 3889), 'app.tools.FindEmbeddingsPath', 'FindEmbeddingsPath', (['project.model.name'], {}), '(project.model.name)\n', (3869, 3889), False, 'from app.tools import FindEmbeddingsPath\n'), ((3903, 3939), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'path'}), '(path=path)\n', (3928, 3939), False, 'import chromadb\n'), ((4880, 4918), 'app.tools.FindEmbeddingsPath', 'FindEmbeddingsPath', (['project.model.name'], {}), '(project.model.name)\n', (4898, 4918), False, 'from app.tools import FindEmbeddingsPath\n'), ((4932, 4968), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'path'}), '(path=path)\n', (4957, 4968), False, 'import chromadb\n'), ((5552, 5590), 'app.tools.FindEmbeddingsPath', 'FindEmbeddingsPath', (['project.model.name'], {}), '(project.model.name)\n', (5570, 5590), False, 'from app.tools import FindEmbeddingsPath\n'), ((5604, 5640), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'path'}), '(path=path)\n', (5629, 5640), False, 'import chromadb\n'), ((6622, 6660), 'app.tools.FindEmbeddingsPath', 'FindEmbeddingsPath', (['project.model.name'], {}), '(project.model.name)\n', (6640, 6660), False, 'from app.tools import FindEmbeddingsPath\n'), ((6674, 6710), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'path'}), '(path=path)\n', (6699, 6710), False, 'import chromadb\n'), ((8429, 8467), 'app.tools.FindEmbeddingsPath', 'FindEmbeddingsPath', (['project.model.name'], {}), '(project.model.name)\n', (8447, 8467), False, 'from app.tools import FindEmbeddingsPath\n'), ((8481, 8517), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'path'}), '(path=path)\n', (8506, 8517), False, 'import chromadb\n'), ((9256, 9294), 'app.tools.FindEmbeddingsPath', 'FindEmbeddingsPath', (['project.model.name'], {}), '(project.model.name)\n', (9274, 9294), False, 'from app.tools import FindEmbeddingsPath\n'), ((9308, 9344), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'path'}), '(path=path)\n', (9333, 9344), False, 'import chromadb\n'), ((9851, 9889), 'app.tools.FindEmbeddingsPath', 'FindEmbeddingsPath', (['project.model.name'], {}), '(project.model.name)\n', (9869, 9889), False, 'from app.tools import FindEmbeddingsPath\n'), ((9903, 9939), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': 'path'}), '(path=path)\n', (9928, 9939), False, 'import chromadb\n'), ((3349, 3449), 'redis.Redis', 'redis.Redis', ([], {'host': "os.environ['REDIS_HOST']", 'port': "os.environ['REDIS_PORT']", 'decode_responses': '(True)'}), "(host=os.environ['REDIS_HOST'], port=os.environ['REDIS_PORT'],\n decode_responses=True)\n", (3360, 3449), False, 'import redis\n'), ((4337, 4437), 'redis.Redis', 'redis.Redis', ([], {'host': "os.environ['REDIS_HOST']", 'port': "os.environ['REDIS_PORT']", 'decode_responses': '(True)'}), "(host=os.environ['REDIS_HOST'], port=os.environ['REDIS_PORT'],\n decode_responses=True)\n", (4348, 4437), False, 'import redis\n'), ((5210, 5310), 'redis.Redis', 'redis.Redis', ([], {'host': "os.environ['REDIS_HOST']", 'port': "os.environ['REDIS_PORT']", 'decode_responses': '(True)'}), "(host=os.environ['REDIS_HOST'], port=os.environ['REDIS_PORT'],\n decode_responses=True)\n", (5221, 5310), False, 'import redis\n'), ((5830, 5930), 'redis.Redis', 'redis.Redis', ([], {'host': "os.environ['REDIS_HOST']", 'port': "os.environ['REDIS_PORT']", 'decode_responses': '(True)'}), "(host=os.environ['REDIS_HOST'], port=os.environ['REDIS_PORT'],\n decode_responses=True)\n", (5841, 5930), False, 'import redis\n'), ((7049, 7149), 'redis.Redis', 'redis.Redis', ([], {'host': "os.environ['REDIS_HOST']", 'port': "os.environ['REDIS_PORT']", 'decode_responses': '(True)'}), "(host=os.environ['REDIS_HOST'], port=os.environ['REDIS_PORT'],\n decode_responses=True)\n", (7060, 7149), False, 'import redis\n'), ((7716, 7754), 'app.tools.FindEmbeddingsPath', 'FindEmbeddingsPath', (['project.model.name'], {}), '(project.model.name)\n', (7734, 7754), False, 'from app.tools import FindEmbeddingsPath\n'), ((7767, 7816), 'shutil.rmtree', 'shutil.rmtree', (['embeddingsPath'], {'ignore_errors': '(True)'}), '(embeddingsPath, ignore_errors=True)\n', (7780, 7816), False, 'import shutil\n'), ((7928, 8028), 'redis.Redis', 'redis.Redis', ([], {'host': "os.environ['REDIS_HOST']", 'port': "os.environ['REDIS_PORT']", 'decode_responses': '(True)'}), "(host=os.environ['REDIS_HOST'], port=os.environ['REDIS_PORT'],\n decode_responses=True)\n", (7939, 8028), False, 'import redis\n'), ((8769, 8869), 'redis.Redis', 'redis.Redis', ([], {'host': "os.environ['REDIS_HOST']", 'port': "os.environ['REDIS_PORT']", 'decode_responses': '(True)'}), "(host=os.environ['REDIS_HOST'], port=os.environ['REDIS_PORT'],\n decode_responses=True)\n", (8780, 8869), False, 'import redis\n'), ((9580, 9680), 'redis.Redis', 'redis.Redis', ([], {'host': "os.environ['REDIS_HOST']", 'port': "os.environ['REDIS_PORT']", 'decode_responses': '(True)'}), "(host=os.environ['REDIS_HOST'], port=os.environ['REDIS_PORT'],\n decode_responses=True)\n", (9591, 9680), False, 'import redis\n'), ((10023, 10123), 'redis.Redis', 'redis.Redis', ([], {'host': "os.environ['REDIS_HOST']", 'port': "os.environ['REDIS_PORT']", 'decode_responses': '(True)'}), "(host=os.environ['REDIS_HOST'], port=os.environ['REDIS_PORT'],\n decode_responses=True)\n", (10034, 10123), False, 'import redis\n'), ((1088, 1332), 'llama_index.vector_stores.redis.RedisVectorStore', 'RedisVectorStore', ([], {'redis_url': "('redis://' + os.environ['REDIS_HOST'] + ':' + os.environ['REDIS_PORT'])", 'index_name': 'project.model.name', 'metadata_fields': "['source', 'keywords']", 'index_prefix': "('llama_' + project.model.name)", 'overwrite': '(False)'}), "(redis_url='redis://' + os.environ['REDIS_HOST'] + ':' + os\n .environ['REDIS_PORT'], index_name=project.model.name, metadata_fields=\n ['source', 'keywords'], index_prefix='llama_' + project.model.name,\n overwrite=False)\n", (1104, 1332), False, 'from llama_index.vector_stores.redis import RedisVectorStore\n'), ((1479, 1534), 'llama_index.core.storage.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (1507, 1534), False, 'from llama_index.core.storage import StorageContext\n'), ((8162, 8200), 'app.tools.FindEmbeddingsPath', 'FindEmbeddingsPath', (['project.model.name'], {}), '(project.model.name)\n', (8180, 8200), False, 'from app.tools import FindEmbeddingsPath\n'), ((8213, 8262), 'shutil.rmtree', 'shutil.rmtree', (['embeddingsPath'], {'ignore_errors': '(True)'}), '(embeddingsPath, ignore_errors=True)\n', (8226, 8262), False, 'import shutil\n'), ((1037, 1053), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1047, 1053), False, 'import os\n')]
#!/usr/bin/env python3 import json import logging import re import requests import altair as alt import matplotlib.pyplot as plt import pandas as pd import streamlit as st from datetime import datetime, timedelta from langchain.llms import OpenAI from llama_index import GPTVectorStoreIndex, Document, LLMPredictor, ServiceContext from requests.exceptions import HTTPError from wordcloud import WordCloud logger = logging.getLogger("llama_index") logger.setLevel(logging.WARNING) TITLE = "Daily News Summary" ICON = "https://archive.org/favicon.ico" VISEXP = "https://storage.googleapis.com/data.gdeltproject.org/gdeltv3/iatv/visualexplorer" BGNDT = pd.to_datetime("2022-03-25").date() ENDDT = (datetime.now() - timedelta(hours=30)).date() CHANNELS = { "": "-- Select --", "ESPRESO": "Espreso TV", "RUSSIA1": "Russia-1", "RUSSIA24": "Russia-24", "1TV": "Channel One Russia", "NTV": "NTV", "BELARUSTV": "Belarus TV", "IRINN": "Islamic Republic of Iran News Network" } st.set_page_config(page_title=TITLE, page_icon=ICON, layout="centered", initial_sidebar_state="collapsed") st.title(TITLE) llm_predictor = LLMPredictor(llm=OpenAI(max_tokens=1024, model_name="text-davinci-003")) @st.cache_resource(show_spinner=False) def load_transcript(id, lg): lang = "" if lg == "Original" else ".en" r = requests.get(f"{VISEXP}/{id}.transcript{lang}.txt") r.raise_for_status() return r.content @st.cache_resource(show_spinner=False) def load_index(ch, dt, lg): r = requests.get(f"{VISEXP}/{ch}.{dt}.inventory.json") r.raise_for_status() shows = r.json()["shows"] idx = GPTVectorStoreIndex.from_documents([], service_context=ServiceContext.from_defaults(llm_predictor=llm_predictor)) msg = f"Loading `{dt[:4]}-{dt[4:6]}-{dt[6:8]}` {lg} transcripts for `{CHANNELS.get(ch, 'selected')}` channel..." prog = st.progress(0.0, text=msg) for i, tr in enumerate(shows, start=1): try: idx.insert(Document(load_transcript(tr["id"], lg).decode("utf-8")), llm_predictor=llm_predictor) except HTTPError as e: pass prog.progress(i/len(shows), text=msg) prog.empty() return idx.as_query_engine() @st.cache_resource(show_spinner="Extracting top entities...") def get_top_entities(_idx, ch, dt, lg): res = _idx.query("20 most frequent entities with their frequency in these articles as key-value in JSON format") kw = json.loads(res.response.strip()) wc = WordCloud(background_color="white") wc.generate_from_frequencies(kw) fig, ax = plt.subplots() ax.imshow(wc) ax.axis("off") return fig, pd.DataFrame(kw.items()).rename(columns={0: "Entity", 1: "Frequency"}).sort_values("Frequency", ascending=False) @st.cache_resource(show_spinner="Constructing news headlines...") def get_headlines(_idx, ch, dt, lg): return _idx.query("Top 10 news headlines with summary in these articles in Markdown format") qp = st.experimental_get_query_params() if "date" not in st.session_state and qp.get("date"): st.session_state["date"] = datetime.strptime(qp.get("date")[0], "%Y-%m-%d").date() if "chan" not in st.session_state and qp.get("chan"): st.session_state["chan"] = qp.get("chan")[0] if "lang" not in st.session_state and qp.get("lang"): st.session_state["lang"] = qp.get("lang")[0] cols = st.columns(3) dt = cols[0].date_input("Date", value=ENDDT, min_value=BGNDT, max_value=ENDDT, key="date").strftime("%Y%m%d") ch = cols[1].selectbox("Channel", CHANNELS, format_func=lambda x: CHANNELS.get(x, ""), key="chan") lg = cols[2].selectbox("Language", ["English", "Original"], format_func=lambda x: "English (Translation)" if x == "English" else x, key="lang", disabled=True) # Disabled due to a bug https://github.com/jerryjliu/gpt_index/issues/294 if not ch: st.info(f"Select a channel to summarize for the selected day.") st.stop() st.experimental_set_query_params(**st.session_state) try: idx = load_index(ch, dt, lg) except HTTPError as e: st.warning(f"Transcripts for `{CHANNELS.get(ch, 'selected')}` channel are not available for `{dt[:4]}-{dt[4:6]}-{dt[6:8]}` yet, try selecting another date!", icon="⚠️") st.stop() tbs = st.tabs(["Top Entities", "Frequencies"]) try: fig, d = get_top_entities(idx, ch, dt, lg) tbs[0].pyplot(fig) tbs[1].dataframe(d, use_container_width=True) except: msg = "Entity frequency data is not in the expected JSON shape!" tbs[0].warning(msg) tbs[1].warning(msg) "### Top Headlines" res = get_headlines(idx, ch, dt, lg) st.markdown(res.response)
[ "llama_index.ServiceContext.from_defaults" ]
[((419, 451), 'logging.getLogger', 'logging.getLogger', (['"""llama_index"""'], {}), "('llama_index')\n", (436, 451), False, 'import logging\n'), ((992, 1102), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': 'TITLE', 'page_icon': 'ICON', 'layout': '"""centered"""', 'initial_sidebar_state': '"""collapsed"""'}), "(page_title=TITLE, page_icon=ICON, layout='centered',\n initial_sidebar_state='collapsed')\n", (1010, 1102), True, 'import streamlit as st\n'), ((1099, 1114), 'streamlit.title', 'st.title', (['TITLE'], {}), '(TITLE)\n', (1107, 1114), True, 'import streamlit as st\n'), ((1208, 1245), 'streamlit.cache_resource', 'st.cache_resource', ([], {'show_spinner': '(False)'}), '(show_spinner=False)\n', (1225, 1245), True, 'import streamlit as st\n'), ((1421, 1458), 'streamlit.cache_resource', 'st.cache_resource', ([], {'show_spinner': '(False)'}), '(show_spinner=False)\n', (1438, 1458), True, 'import streamlit as st\n'), ((2151, 2211), 'streamlit.cache_resource', 'st.cache_resource', ([], {'show_spinner': '"""Extracting top entities..."""'}), "(show_spinner='Extracting top entities...')\n", (2168, 2211), True, 'import streamlit as st\n'), ((2675, 2739), 'streamlit.cache_resource', 'st.cache_resource', ([], {'show_spinner': '"""Constructing news headlines..."""'}), "(show_spinner='Constructing news headlines...')\n", (2692, 2739), True, 'import streamlit as st\n'), ((2879, 2913), 'streamlit.experimental_get_query_params', 'st.experimental_get_query_params', ([], {}), '()\n', (2911, 2913), True, 'import streamlit as st\n'), ((3269, 3282), 'streamlit.columns', 'st.columns', (['(3)'], {}), '(3)\n', (3279, 3282), True, 'import streamlit as st\n'), ((3816, 3868), 'streamlit.experimental_set_query_params', 'st.experimental_set_query_params', ([], {}), '(**st.session_state)\n', (3848, 3868), True, 'import streamlit as st\n'), ((4119, 4159), 'streamlit.tabs', 'st.tabs', (["['Top Entities', 'Frequencies']"], {}), "(['Top Entities', 'Frequencies'])\n", (4126, 4159), True, 'import streamlit as st\n'), ((4458, 4483), 'streamlit.markdown', 'st.markdown', (['res.response'], {}), '(res.response)\n', (4469, 4483), True, 'import streamlit as st\n'), ((1324, 1375), 'requests.get', 'requests.get', (['f"""{VISEXP}/{id}.transcript{lang}.txt"""'], {}), "(f'{VISEXP}/{id}.transcript{lang}.txt')\n", (1336, 1375), False, 'import requests\n'), ((1493, 1543), 'requests.get', 'requests.get', (['f"""{VISEXP}/{ch}.{dt}.inventory.json"""'], {}), "(f'{VISEXP}/{ch}.{dt}.inventory.json')\n", (1505, 1543), False, 'import requests\n'), ((1841, 1867), 'streamlit.progress', 'st.progress', (['(0.0)'], {'text': 'msg'}), '(0.0, text=msg)\n', (1852, 1867), True, 'import streamlit as st\n'), ((2414, 2449), 'wordcloud.WordCloud', 'WordCloud', ([], {'background_color': '"""white"""'}), "(background_color='white')\n", (2423, 2449), False, 'from wordcloud import WordCloud\n'), ((2497, 2511), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2509, 2511), True, 'import matplotlib.pyplot as plt\n'), ((3739, 3802), 'streamlit.info', 'st.info', (['f"""Select a channel to summarize for the selected day."""'], {}), "(f'Select a channel to summarize for the selected day.')\n", (3746, 3802), True, 'import streamlit as st\n'), ((3805, 3814), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (3812, 3814), True, 'import streamlit as st\n'), ((657, 685), 'pandas.to_datetime', 'pd.to_datetime', (['"""2022-03-25"""'], {}), "('2022-03-25')\n", (671, 685), True, 'import pandas as pd\n'), ((1149, 1203), 'langchain.llms.OpenAI', 'OpenAI', ([], {'max_tokens': '(1024)', 'model_name': '"""text-davinci-003"""'}), "(max_tokens=1024, model_name='text-davinci-003')\n", (1155, 1203), False, 'from langchain.llms import OpenAI\n'), ((4102, 4111), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (4109, 4111), True, 'import streamlit as st\n'), ((702, 716), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (714, 716), False, 'from datetime import datetime, timedelta\n'), ((719, 738), 'datetime.timedelta', 'timedelta', ([], {'hours': '(30)'}), '(hours=30)\n', (728, 738), False, 'from datetime import datetime, timedelta\n'), ((1658, 1715), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor'}), '(llm_predictor=llm_predictor)\n', (1686, 1715), False, 'from llama_index import GPTVectorStoreIndex, Document, LLMPredictor, ServiceContext\n')]
# https://gpt-index.readthedocs.io/en/latest/examples/query_engine/sub_question_query_engine.html # Using LlamaIndex as a Callable Tool from langchain.agents import Tool from langchain.chains.conversation.memory import ConversationBufferMemory from langchain.chat_models import ChatOpenAI from langchain.agents import initialize_agent from langchain import HuggingFaceHub from llama_index import LangchainEmbedding, ServiceContext from langchain.embeddings.huggingface import HuggingFaceEmbeddings from llama_index.tools import QueryEngineTool, ToolMetadata from llama_index import VectorStoreIndex, SimpleDirectoryReader, LLMPredictor, ServiceContext from llama_index.query_engine import SubQuestionQueryEngine documents = SimpleDirectoryReader('data/experiment').load_data() repo_id = "tiiuae/falcon-7b" embed_model = LangchainEmbedding(HuggingFaceEmbeddings()) llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={"temperature": 0.1, 'truncation': 'only_first', "max_length": 1024}) llm_predictor = LLMPredictor(llm=llm) service_context = ServiceContext.from_defaults(chunk_size=512, llm_predictor=llm_predictor, embed_model=embed_model) index = VectorStoreIndex.from_documents(documents=documents, service_context=service_context) query_engine = index.as_query_engine(similarity_top_k=3) # setup base query engine as tool query_engine_tools = [ QueryEngineTool( query_engine=query_engine, metadata=ToolMetadata(name='pg_essay', description='Paul Graham essay on What I Worked On') ) ] query_engine = SubQuestionQueryEngine.from_defaults(query_engine_tools=query_engine_tools) # response = s_engine.query('Explain childhood') response = query_engine.query('How was Paul Grahams life different before and after YC?') print(response)
[ "llama_index.VectorStoreIndex.from_documents", "llama_index.tools.ToolMetadata", "llama_index.query_engine.SubQuestionQueryEngine.from_defaults", "llama_index.LLMPredictor", "llama_index.ServiceContext.from_defaults", "llama_index.SimpleDirectoryReader" ]
[((874, 992), 'langchain.HuggingFaceHub', 'HuggingFaceHub', ([], {'repo_id': 'repo_id', 'model_kwargs': "{'temperature': 0.1, 'truncation': 'only_first', 'max_length': 1024}"}), "(repo_id=repo_id, model_kwargs={'temperature': 0.1,\n 'truncation': 'only_first', 'max_length': 1024})\n", (888, 992), False, 'from langchain import HuggingFaceHub\n'), ((1057, 1078), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (1069, 1078), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, LLMPredictor, ServiceContext\n'), ((1097, 1199), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'chunk_size': '(512)', 'llm_predictor': 'llm_predictor', 'embed_model': 'embed_model'}), '(chunk_size=512, llm_predictor=llm_predictor,\n embed_model=embed_model)\n', (1125, 1199), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, LLMPredictor, ServiceContext\n'), ((1205, 1295), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', ([], {'documents': 'documents', 'service_context': 'service_context'}), '(documents=documents, service_context=\n service_context)\n', (1236, 1295), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, LLMPredictor, ServiceContext\n'), ((1586, 1661), 'llama_index.query_engine.SubQuestionQueryEngine.from_defaults', 'SubQuestionQueryEngine.from_defaults', ([], {'query_engine_tools': 'query_engine_tools'}), '(query_engine_tools=query_engine_tools)\n', (1622, 1661), False, 'from llama_index.query_engine import SubQuestionQueryEngine\n'), ((842, 865), 'langchain.embeddings.huggingface.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {}), '()\n', (863, 865), False, 'from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n'), ((727, 767), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""data/experiment"""'], {}), "('data/experiment')\n", (748, 767), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, LLMPredictor, ServiceContext\n'), ((1479, 1566), 'llama_index.tools.ToolMetadata', 'ToolMetadata', ([], {'name': '"""pg_essay"""', 'description': '"""Paul Graham essay on What I Worked On"""'}), "(name='pg_essay', description=\n 'Paul Graham essay on What I Worked On')\n", (1491, 1566), False, 'from llama_index.tools import QueryEngineTool, ToolMetadata\n')]
from llama_index.core.node_parser import HTMLNodeParser from llama_index.readers.file import FlatReader from pathlib import Path reader = FlatReader() document = reader.load_data(Path("files/others/sample.html")) my_tags = ["p", "span"] html_parser = HTMLNodeParser(tags=my_tags) nodes = html_parser.get_nodes_from_documents(document) print('<span> elements:') for node in nodes: if node.metadata['tag']=='span': print(node.text) print('<p> elements:') for node in nodes: if node.metadata['tag']=='p': print(node.text)
[ "llama_index.readers.file.FlatReader", "llama_index.core.node_parser.HTMLNodeParser" ]
[((139, 151), 'llama_index.readers.file.FlatReader', 'FlatReader', ([], {}), '()\n', (149, 151), False, 'from llama_index.readers.file import FlatReader\n'), ((255, 283), 'llama_index.core.node_parser.HTMLNodeParser', 'HTMLNodeParser', ([], {'tags': 'my_tags'}), '(tags=my_tags)\n', (269, 283), False, 'from llama_index.core.node_parser import HTMLNodeParser\n'), ((180, 212), 'pathlib.Path', 'Path', (['"""files/others/sample.html"""'], {}), "('files/others/sample.html')\n", (184, 212), False, 'from pathlib import Path\n')]
from django.shortcuts import render from django.views import generic from rest_framework.decorators import api_view from rest_framework.response import Response from django.views.decorators.csrf import csrf_exempt from django.conf import settings from django.contrib.auth.mixins import LoginRequiredMixin import os from llama_index import ( StorageContext, load_index_from_storage ) def Bot(q): storage_context = StorageContext.from_defaults(persist_dir=os.path.join(settings.MEDIA_ROOT,"blog_store")) # load index index = load_index_from_storage(storage_context) query_engine = index.as_query_engine() response = query_engine.query(q) return response # Create your views here. class PageView(LoginRequiredMixin,generic.TemplateView): template_name = 'chatbot/chatbot.html' @api_view(['POST']) def chatbot(request): question = request.data['question'] print (question) answer = Bot(question) #print(answer,answer.source_nodes) #answer = f"Answer to {question}" return Response({'answer': str(answer)})
[ "llama_index.load_index_from_storage" ]
[((817, 835), 'rest_framework.decorators.api_view', 'api_view', (["['POST']"], {}), "(['POST'])\n", (825, 835), False, 'from rest_framework.decorators import api_view\n'), ((545, 585), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (568, 585), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((468, 515), 'os.path.join', 'os.path.join', (['settings.MEDIA_ROOT', '"""blog_store"""'], {}), "(settings.MEDIA_ROOT, 'blog_store')\n", (480, 515), False, 'import os\n')]
from pathlib import Path from llama_index import download_loader, LLMPredictor, ServiceContext, VectorStoreIndex from llama_index.vector_stores import MilvusVectorStore from llama_index.readers import PDFReader from llama_index import StorageContext from pymilvus import MilvusClient import os # Define constants for Milvus configuration MILVUS_HOST = os.environ.get("MILVUS_HOST", "10.97.151.193") MILVUS_PORT = os.environ.get("MILVUS_PORT", "19530") MILVUS_URI = f"http://{MILVUS_HOST}:{MILVUS_PORT}" # Initialize PDFReader pdf_reader = PDFReader() # Load documents from a PDF file document_path = Path('ingestion/keiichi_tsuchiya.pdf') #ToDo: load from S3 instead of local documents = pdf_reader.load_data(file=document_path) # Create an LLMPredictor with default parameters llm_predictor = LLMPredictor(llm=None) # Create a ServiceContext with LLMPredictor service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor) # Initialize a MilvusVectorStore with Milvus server configuration vector_store = MilvusVectorStore( uri=MILVUS_URI, dim=384, use_secure=False ) # Create a StorageContext with the MilvusVectorStore storage_context = StorageContext.from_defaults(vector_store=vector_store) # Create a VectorStoreIndex from the loaded documents index = VectorStoreIndex.from_documents( documents=documents, overwrite=True, # Set to False if you don't want to overwrite the index service_context=service_context, storage_context=storage_context ) # You can now perform queries with the index # For example: # result = index.query("What communication protocol is used in Pymilvus for communicating with Milvus?") # print(result)
[ "llama_index.VectorStoreIndex.from_documents", "llama_index.LLMPredictor", "llama_index.ServiceContext.from_defaults", "llama_index.StorageContext.from_defaults", "llama_index.readers.PDFReader", "llama_index.vector_stores.MilvusVectorStore" ]
[((353, 399), 'os.environ.get', 'os.environ.get', (['"""MILVUS_HOST"""', '"""10.97.151.193"""'], {}), "('MILVUS_HOST', '10.97.151.193')\n", (367, 399), False, 'import os\n'), ((414, 452), 'os.environ.get', 'os.environ.get', (['"""MILVUS_PORT"""', '"""19530"""'], {}), "('MILVUS_PORT', '19530')\n", (428, 452), False, 'import os\n'), ((541, 552), 'llama_index.readers.PDFReader', 'PDFReader', ([], {}), '()\n', (550, 552), False, 'from llama_index.readers import PDFReader\n'), ((603, 641), 'pathlib.Path', 'Path', (['"""ingestion/keiichi_tsuchiya.pdf"""'], {}), "('ingestion/keiichi_tsuchiya.pdf')\n", (607, 641), False, 'from pathlib import Path\n'), ((799, 821), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'None'}), '(llm=None)\n', (811, 821), False, 'from llama_index import download_loader, LLMPredictor, ServiceContext, VectorStoreIndex\n'), ((885, 942), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor'}), '(llm_predictor=llm_predictor)\n', (913, 942), False, 'from llama_index import download_loader, LLMPredictor, ServiceContext, VectorStoreIndex\n'), ((1025, 1085), 'llama_index.vector_stores.MilvusVectorStore', 'MilvusVectorStore', ([], {'uri': 'MILVUS_URI', 'dim': '(384)', 'use_secure': '(False)'}), '(uri=MILVUS_URI, dim=384, use_secure=False)\n', (1042, 1085), False, 'from llama_index.vector_stores import MilvusVectorStore\n'), ((1172, 1227), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (1200, 1227), False, 'from llama_index import StorageContext\n'), ((1291, 1429), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', ([], {'documents': 'documents', 'overwrite': '(True)', 'service_context': 'service_context', 'storage_context': 'storage_context'}), '(documents=documents, overwrite=True,\n service_context=service_context, storage_context=storage_context)\n', (1322, 1429), False, 'from llama_index import download_loader, LLMPredictor, ServiceContext, VectorStoreIndex\n')]
from llama_index.llms.ollama import Ollama from typing import Any, Sequence from llama_index.core.bridge.pydantic import Field from llama_index.core.base.llms.types import ( ChatMessage, ChatResponseGen, CompletionResponse, CompletionResponseGen, ) from llama_index.core.llms.callbacks import llm_chat_callback, llm_completion_callback class Ollama(Ollama): system: str = Field( default="", description="Default system message to send to the model." ) keep_alive: int = Field( default=0, description="Time, in minutes, to wait before unloading model.", ) request_timeout = 120.0 @llm_chat_callback() def chat(self, messages: Sequence[ChatMessage], **kwargs: Any): if self.system and len(messages) > 0 and messages[0].role != "system": messages.insert( 0, ChatMessage(role="system", content=self.system) ) kwargs["keep_alive"] = self.keep_alive return super().chat(messages, **kwargs) @llm_chat_callback() def stream_chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponseGen: if self.system and len(messages) > 0 and messages[0].role != "system": messages.insert( 0, ChatMessage(role="system", content=self.system) ) kwargs["keep_alive"] = self.keep_alive yield from super().stream_chat(messages, **kwargs) @llm_completion_callback() def complete(self, prompt: str, formatted: bool = False, **kwargs: Any) -> CompletionResponse: if self.system: kwargs["system"] = self.system kwargs["keep_alive"] = self.keep_alive return super().complete(prompt, formatted, **kwargs) @llm_completion_callback() def stream_complete(self, prompt: str, formatted: bool = False, **kwargs: Any) -> CompletionResponseGen: if self.system: kwargs["system"] = self.system kwargs["keep_alive"] = self.keep_alive yield from super().stream_complete(prompt, formatted, **kwargs)
[ "llama_index.core.llms.callbacks.llm_completion_callback", "llama_index.core.llms.callbacks.llm_chat_callback", "llama_index.core.base.llms.types.ChatMessage", "llama_index.core.bridge.pydantic.Field" ]
[((396, 473), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': '""""""', 'description': '"""Default system message to send to the model."""'}), "(default='', description='Default system message to send to the model.')\n", (401, 473), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((510, 596), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': '(0)', 'description': '"""Time, in minutes, to wait before unloading model."""'}), "(default=0, description=\n 'Time, in minutes, to wait before unloading model.')\n", (515, 596), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((651, 670), 'llama_index.core.llms.callbacks.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (668, 670), False, 'from llama_index.core.llms.callbacks import llm_chat_callback, llm_completion_callback\n'), ((1033, 1052), 'llama_index.core.llms.callbacks.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (1050, 1052), False, 'from llama_index.core.llms.callbacks import llm_chat_callback, llm_completion_callback\n'), ((1452, 1477), 'llama_index.core.llms.callbacks.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (1475, 1477), False, 'from llama_index.core.llms.callbacks import llm_chat_callback, llm_completion_callback\n'), ((1762, 1787), 'llama_index.core.llms.callbacks.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (1785, 1787), False, 'from llama_index.core.llms.callbacks import llm_chat_callback, llm_completion_callback\n'), ((866, 913), 'llama_index.core.base.llms.types.ChatMessage', 'ChatMessage', ([], {'role': '"""system"""', 'content': 'self.system'}), "(role='system', content=self.system)\n", (877, 913), False, 'from llama_index.core.base.llms.types import ChatMessage, ChatResponseGen, CompletionResponse, CompletionResponseGen\n'), ((1274, 1321), 'llama_index.core.base.llms.types.ChatMessage', 'ChatMessage', ([], {'role': '"""system"""', 'content': 'self.system'}), "(role='system', content=self.system)\n", (1285, 1321), False, 'from llama_index.core.base.llms.types import ChatMessage, ChatResponseGen, CompletionResponse, CompletionResponseGen\n')]
#!/usr/bin/env python3 from dataclasses import dataclass, field from typing import cast from loguru import logger from llama_index.core import Document, VectorStoreIndex, Settings from llama_index.core.query_engine import CitationQueryEngine import nest_asyncio from uglychain import Model, Retriever, StorageRetriever from uglychain.storage import Storage, SQLiteStorage from uglychain.llm.llama_index import LlamaIndexLLM import sys import logging logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) nest_asyncio.apply() Settings.llm = LlamaIndexLLM(model=Model.GPT3_TURBO) @dataclass class GithubIndex: filename: str = "data/github/github.db" model: Model = Model.DEFAULT summarizer_db: Storage = field(init=False) retriever: StorageRetriever = field(init=False) def __post_init__(self): self.summarizer_db = SQLiteStorage(self.filename, "ReadmeSummarizer", 30) self.retriever = Retriever.LlamaIndex.getStorage( persist_dir="./data/github/repos" ) if self._need_update: self._update() def search(self, query: str): index = cast(VectorStoreIndex, self.retriever.index) # type: ignore query_engine = CitationQueryEngine.from_args(index, similarity_top_k=5) # query_engine = index.as_query_engine(llm=LlamaIndexLLM(Model.GPT3_TURBO), similarity_top_k=8) # type: ignore return query_engine.query(query) # self.retriever.get(query, "refine") @property def _need_update(self): return False def _update(self): doc_chunks = [] data = self.summarizer_db.load(condition="timestamp = date('now','localtime')") for key, value in data.items(): doc = Document(text=value, doc_id=key) doc_chunks.append(doc) index = cast(VectorStoreIndex, self.retriever.index) # type: ignore logger.info("refresh_ref_docs") index.refresh_ref_docs(doc_chunks) self.retriever.storage.save(index) logger.info("refresh_ref_docs done") if __name__ == "__main__": index = GithubIndex() result = index.search("给我介绍几个关于使用大模型自动写代码的项目吧!") # logger.debug(result.source_nodes) logger.info(result)
[ "llama_index.core.query_engine.CitationQueryEngine.from_args", "llama_index.core.Document" ]
[((454, 512), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (473, 512), False, 'import logging\n'), ((587, 607), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (605, 607), False, 'import nest_asyncio\n'), ((623, 660), 'uglychain.llm.llama_index.LlamaIndexLLM', 'LlamaIndexLLM', ([], {'model': 'Model.GPT3_TURBO'}), '(model=Model.GPT3_TURBO)\n', (636, 660), False, 'from uglychain.llm.llama_index import LlamaIndexLLM\n'), ((544, 584), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (565, 584), False, 'import logging\n'), ((799, 816), 'dataclasses.field', 'field', ([], {'init': '(False)'}), '(init=False)\n', (804, 816), False, 'from dataclasses import dataclass, field\n'), ((851, 868), 'dataclasses.field', 'field', ([], {'init': '(False)'}), '(init=False)\n', (856, 868), False, 'from dataclasses import dataclass, field\n'), ((2276, 2295), 'loguru.logger.info', 'logger.info', (['result'], {}), '(result)\n', (2287, 2295), False, 'from loguru import logger\n'), ((513, 532), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (530, 532), False, 'import logging\n'), ((928, 980), 'uglychain.storage.SQLiteStorage', 'SQLiteStorage', (['self.filename', '"""ReadmeSummarizer"""', '(30)'], {}), "(self.filename, 'ReadmeSummarizer', 30)\n", (941, 980), False, 'from uglychain.storage import Storage, SQLiteStorage\n'), ((1006, 1072), 'uglychain.Retriever.LlamaIndex.getStorage', 'Retriever.LlamaIndex.getStorage', ([], {'persist_dir': '"""./data/github/repos"""'}), "(persist_dir='./data/github/repos')\n", (1037, 1072), False, 'from uglychain import Model, Retriever, StorageRetriever\n'), ((1203, 1247), 'typing.cast', 'cast', (['VectorStoreIndex', 'self.retriever.index'], {}), '(VectorStoreIndex, self.retriever.index)\n', (1207, 1247), False, 'from typing import cast\n'), ((1287, 1343), 'llama_index.core.query_engine.CitationQueryEngine.from_args', 'CitationQueryEngine.from_args', (['index'], {'similarity_top_k': '(5)'}), '(index, similarity_top_k=5)\n', (1316, 1343), False, 'from llama_index.core.query_engine import CitationQueryEngine\n'), ((1892, 1936), 'typing.cast', 'cast', (['VectorStoreIndex', 'self.retriever.index'], {}), '(VectorStoreIndex, self.retriever.index)\n', (1896, 1936), False, 'from typing import cast\n'), ((1961, 1992), 'loguru.logger.info', 'logger.info', (['"""refresh_ref_docs"""'], {}), "('refresh_ref_docs')\n", (1972, 1992), False, 'from loguru import logger\n'), ((2087, 2123), 'loguru.logger.info', 'logger.info', (['"""refresh_ref_docs done"""'], {}), "('refresh_ref_docs done')\n", (2098, 2123), False, 'from loguru import logger\n'), ((1808, 1840), 'llama_index.core.Document', 'Document', ([], {'text': 'value', 'doc_id': 'key'}), '(text=value, doc_id=key)\n', (1816, 1840), False, 'from llama_index.core import Document, VectorStoreIndex, Settings\n')]
from llama_index.core import SimpleDirectoryReader from llama_index.core.node_parser import SentenceSplitter from llama_index.extractors.entity import EntityExtractor reader = SimpleDirectoryReader('files') documents = reader.load_data() parser = SentenceSplitter(include_prev_next_rel=True) nodes = parser.get_nodes_from_documents(documents) entity_extractor = EntityExtractor( label_entities = True, device = "cpu" ) metadata_list = entity_extractor.extract(nodes) print(metadata_list)
[ "llama_index.core.node_parser.SentenceSplitter", "llama_index.core.SimpleDirectoryReader", "llama_index.extractors.entity.EntityExtractor" ]
[((177, 207), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""files"""'], {}), "('files')\n", (198, 207), False, 'from llama_index.core import SimpleDirectoryReader\n'), ((248, 292), 'llama_index.core.node_parser.SentenceSplitter', 'SentenceSplitter', ([], {'include_prev_next_rel': '(True)'}), '(include_prev_next_rel=True)\n', (264, 292), False, 'from llama_index.core.node_parser import SentenceSplitter\n'), ((364, 414), 'llama_index.extractors.entity.EntityExtractor', 'EntityExtractor', ([], {'label_entities': '(True)', 'device': '"""cpu"""'}), "(label_entities=True, device='cpu')\n", (379, 414), False, 'from llama_index.extractors.entity import EntityExtractor\n')]
from llama_index.llms.llama_cpp import LlamaCPP from llama_index.llms.llama_cpp.llama_utils import ( messages_to_prompt, completion_to_prompt, ) from llama_index.llms.openai import OpenAI from core.manager import settings MODEL = "openai" # LLM selection if MODEL == "openai": print("USE OPENAI") # Use OpenAI model system_prompt = """If the user is greeting then respond by saying, "Hello, how may we help you ?" """ llm = OpenAI(model="gpt-4-turbo-preview", api_key=settings.OPENAI_KEY, system_prompt=system_prompt) else: # Default to Llama print("USE LLAMA") # model_url: str = "https://huggingface.co/TheBloke/Llama-2-13B-chat-GGUF/resolve/main/llama-2-13b-chat.Q4_0.gguf" model_path: str = "core/models/llama-2-13b-chat.Q2_K.gguf" # TODO: Save the model automatically the first time # Check if model is already downloaded # if not os.path.exists(model_path): # print("Model not found. Downloading...") # response = requests.get(model_url) # with open(model_path, "wb") as f: # f.write(response.content) # print("Model downloaded and saved.") # else: # print("Model found.") llm = LlamaCPP( # model_url=model_url, model_path=model_path, temperature=0.1, max_new_tokens=256, context_window=3900, model_kwargs={"n_gpu_layers": 2}, # set GPU layers to 1 if you have one verbose=True, messages_to_prompt=messages_to_prompt, # providing additional parameters completion_to_prompt=completion_to_prompt, # providing additional parameters ) if __name__ == "__main__": print("LLM")
[ "llama_index.llms.llama_cpp.LlamaCPP", "llama_index.llms.openai.OpenAI" ]
[((470, 567), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-4-turbo-preview"""', 'api_key': 'settings.OPENAI_KEY', 'system_prompt': 'system_prompt'}), "(model='gpt-4-turbo-preview', api_key=settings.OPENAI_KEY,\n system_prompt=system_prompt)\n", (476, 567), False, 'from llama_index.llms.openai import OpenAI\n'), ((1219, 1451), 'llama_index.llms.llama_cpp.LlamaCPP', 'LlamaCPP', ([], {'model_path': 'model_path', 'temperature': '(0.1)', 'max_new_tokens': '(256)', 'context_window': '(3900)', 'model_kwargs': "{'n_gpu_layers': 2}", 'verbose': '(True)', 'messages_to_prompt': 'messages_to_prompt', 'completion_to_prompt': 'completion_to_prompt'}), "(model_path=model_path, temperature=0.1, max_new_tokens=256,\n context_window=3900, model_kwargs={'n_gpu_layers': 2}, verbose=True,\n messages_to_prompt=messages_to_prompt, completion_to_prompt=\n completion_to_prompt)\n", (1227, 1451), False, 'from llama_index.llms.llama_cpp import LlamaCPP\n')]
from llama_index import SimpleDirectoryReader,VectorStoreIndex , load_index_from_storage from llama_index.storage.storage_context import StorageContext from dotenv import load_dotenv import logging import sys load_dotenv() # enable INFO level logging logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) async def load_index(directory_path : str = r'data'): documents = SimpleDirectoryReader(directory_path, filename_as_id=True).load_data() print(f"loaded documents with {len(documents)} pages") try: # Rebuild storage context storage_context = StorageContext.from_defaults(persist_dir="./storage") # Try to load the index from storage index = load_index_from_storage(storage_context) logging.info("Index loaded from storage.") except FileNotFoundError: logging.info("Index not found. Creating a new one...") index = VectorStoreIndex.from_documents(documents) # Persist index to disk index.storage_context.persist() logging.info("New index created and persisted to storage.") return index async def update_index(directory_path : str = r'data'): try: documents = SimpleDirectoryReader(directory_path, filename_as_id=True).load_data() except FileNotFoundError: logging.error("Invalid document directory path.") return None try: # Rebuild storage context storage_context = StorageContext.from_defaults(persist_dir="./storage") # Try to load the index from storage index = load_index_from_storage(storage_context) logging.info("Existing index loaded from storage.") refreshed_docs = index.refresh_ref_docs(documents, update_kwargs={"delete_kwargs": {"delete_from_docstore": True}}) # index.update_ref_doc() print(refreshed_docs) print('Number of newly inserted/refreshed docs: ', sum(refreshed_docs)) index.storage_context.persist() logging.info("Index refreshed and persisted to storage.") return refreshed_docs except FileNotFoundError: # Run refresh_ref_docs function to check for document updates logging.error("Index is not created yet.") return None
[ "llama_index.SimpleDirectoryReader", "llama_index.VectorStoreIndex.from_documents", "llama_index.load_index_from_storage", "llama_index.storage.storage_context.StorageContext.from_defaults" ]
[((212, 225), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (223, 225), False, 'from dotenv import load_dotenv\n'), ((255, 313), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (274, 313), False, 'import logging\n'), ((345, 385), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (366, 385), False, 'import logging\n'), ((314, 333), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (331, 333), False, 'import logging\n'), ((658, 711), 'llama_index.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""./storage"""'}), "(persist_dir='./storage')\n", (686, 711), False, 'from llama_index.storage.storage_context import StorageContext\n'), ((773, 813), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (796, 813), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, load_index_from_storage\n'), ((822, 864), 'logging.info', 'logging.info', (['"""Index loaded from storage."""'], {}), "('Index loaded from storage.')\n", (834, 864), False, 'import logging\n'), ((1510, 1563), 'llama_index.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""./storage"""'}), "(persist_dir='./storage')\n", (1538, 1563), False, 'from llama_index.storage.storage_context import StorageContext\n'), ((1625, 1665), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (1648, 1665), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, load_index_from_storage\n'), ((1674, 1725), 'logging.info', 'logging.info', (['"""Existing index loaded from storage."""'], {}), "('Existing index loaded from storage.')\n", (1686, 1725), False, 'import logging\n'), ((2042, 2099), 'logging.info', 'logging.info', (['"""Index refreshed and persisted to storage."""'], {}), "('Index refreshed and persisted to storage.')\n", (2054, 2099), False, 'import logging\n'), ((459, 517), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['directory_path'], {'filename_as_id': '(True)'}), '(directory_path, filename_as_id=True)\n', (480, 517), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, load_index_from_storage\n'), ((904, 958), 'logging.info', 'logging.info', (['"""Index not found. Creating a new one..."""'], {}), "('Index not found. Creating a new one...')\n", (916, 958), False, 'import logging\n'), ((975, 1017), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (1006, 1017), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, load_index_from_storage\n'), ((1098, 1157), 'logging.info', 'logging.info', (['"""New index created and persisted to storage."""'], {}), "('New index created and persisted to storage.')\n", (1110, 1157), False, 'import logging\n'), ((1371, 1420), 'logging.error', 'logging.error', (['"""Invalid document directory path."""'], {}), "('Invalid document directory path.')\n", (1384, 1420), False, 'import logging\n'), ((2244, 2286), 'logging.error', 'logging.error', (['"""Index is not created yet."""'], {}), "('Index is not created yet.')\n", (2257, 2286), False, 'import logging\n'), ((1262, 1320), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['directory_path'], {'filename_as_id': '(True)'}), '(directory_path, filename_as_id=True)\n', (1283, 1320), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, load_index_from_storage\n')]
from typing import List from fastapi import APIRouter, Depends, HTTPException, status from llama_index.chat_engine.types import BaseChatEngine from llama_index.llms.base import ChatMessage from llama_index.llms.types import MessageRole from pydantic import BaseModel from app.engine.index import get_chat_engine chat_router = r = APIRouter() class _Message(BaseModel): role: MessageRole content: str context: List[str] | None = None class _ChatData(BaseModel): messages: List[_Message] class _Result(BaseModel): result: _Message @r.post("") async def chat( data: _ChatData, chat_engine: BaseChatEngine = Depends(get_chat_engine), ) -> _Result: # check preconditions and get last message if len(data.messages) == 0: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail="No messages provided", ) lastMessage = data.messages.pop() if lastMessage.role != MessageRole.USER: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail="Last message must be from user", ) # convert messages coming from the request to type ChatMessage messages = [ ChatMessage( role=m.role, content=m.content, ) for m in data.messages ] # query chat engine response = await chat_engine.achat(lastMessage.content, messages) return _Result( result=_Message( role=MessageRole.ASSISTANT, content=response.response, context=[x.text for x in response.source_nodes] ) )
[ "llama_index.llms.base.ChatMessage" ]
[((332, 343), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (341, 343), False, 'from fastapi import APIRouter, Depends, HTTPException, status\n'), ((647, 671), 'fastapi.Depends', 'Depends', (['get_chat_engine'], {}), '(get_chat_engine)\n', (654, 671), False, 'from fastapi import APIRouter, Depends, HTTPException, status\n'), ((780, 870), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_400_BAD_REQUEST', 'detail': '"""No messages provided"""'}), "(status_code=status.HTTP_400_BAD_REQUEST, detail=\n 'No messages provided')\n", (793, 870), False, 'from fastapi import APIRouter, Depends, HTTPException, status\n'), ((998, 1098), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_400_BAD_REQUEST', 'detail': '"""Last message must be from user"""'}), "(status_code=status.HTTP_400_BAD_REQUEST, detail=\n 'Last message must be from user')\n", (1011, 1098), False, 'from fastapi import APIRouter, Depends, HTTPException, status\n'), ((1221, 1264), 'llama_index.llms.base.ChatMessage', 'ChatMessage', ([], {'role': 'm.role', 'content': 'm.content'}), '(role=m.role, content=m.content)\n', (1232, 1264), False, 'from llama_index.llms.base import ChatMessage\n')]
import os from llama_index import LLMPredictor, VectorStoreIndex, SimpleDirectoryReader, ServiceContext, LangchainEmbedding from langchain.embeddings import OpenAIEmbeddings from langchain.llms import AzureOpenAI import openai import logging import sys #llamaindex logs logging.basicConfig(stream=sys.stdout, level=logging.INFO) # logging.DEBUG for more verbose output logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) #Enable to show openai logs #openai.log='debug' #Based on your settings, see version, base, key in your Azure AI portal api_type = "azure" api_version = "2023-03-15-preview" api_base = os.getenv("AZURE_API_BASE") api_key = os.getenv("AZURE_API_KEY") chat_deployment = "gpt35" embedding_deployment= "text-embedding-ada-002" # Chat model llm = AzureOpenAI(deployment_name=chat_deployment, openai_api_base=api_base, openai_api_key=api_key, model_kwargs={ "api_type": api_type, "api_version": api_version, }) llm_predictor = LLMPredictor(llm=llm) # Embedding model embedding_llm = LangchainEmbedding( OpenAIEmbeddings( model=embedding_deployment, deployment=embedding_deployment, openai_api_key=api_key, openai_api_base=api_base, openai_api_type=api_type, openai_api_version=api_version, ), embed_batch_size=1 ) #load docs documents = SimpleDirectoryReader('local-data').load_data() service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, embed_model=embedding_llm) index = VectorStoreIndex.from_documents(documents, service_context=service_context) index.storage_context.persist(persist_dir="local-index-azure") print("Saved embeddings")
[ "llama_index.VectorStoreIndex.from_documents", "llama_index.ServiceContext.from_defaults", "llama_index.LLMPredictor", "llama_index.SimpleDirectoryReader" ]
[((271, 329), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (290, 329), False, 'import logging\n'), ((630, 657), 'os.getenv', 'os.getenv', (['"""AZURE_API_BASE"""'], {}), "('AZURE_API_BASE')\n", (639, 657), False, 'import os\n'), ((668, 694), 'os.getenv', 'os.getenv', (['"""AZURE_API_KEY"""'], {}), "('AZURE_API_KEY')\n", (677, 694), False, 'import os\n'), ((788, 955), 'langchain.llms.AzureOpenAI', 'AzureOpenAI', ([], {'deployment_name': 'chat_deployment', 'openai_api_base': 'api_base', 'openai_api_key': 'api_key', 'model_kwargs': "{'api_type': api_type, 'api_version': api_version}"}), "(deployment_name=chat_deployment, openai_api_base=api_base,\n openai_api_key=api_key, model_kwargs={'api_type': api_type,\n 'api_version': api_version})\n", (799, 955), False, 'from langchain.llms import AzureOpenAI\n'), ((975, 996), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (987, 996), False, 'from llama_index import LLMPredictor, VectorStoreIndex, SimpleDirectoryReader, ServiceContext, LangchainEmbedding\n'), ((1414, 1503), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'embed_model': 'embedding_llm'}), '(llm_predictor=llm_predictor, embed_model=\n embedding_llm)\n', (1442, 1503), False, 'from llama_index import LLMPredictor, VectorStoreIndex, SimpleDirectoryReader, ServiceContext, LangchainEmbedding\n'), ((1508, 1583), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (1539, 1583), False, 'from llama_index import LLMPredictor, VectorStoreIndex, SimpleDirectoryReader, ServiceContext, LangchainEmbedding\n'), ((401, 441), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (422, 441), False, 'import logging\n'), ((1056, 1250), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'model': 'embedding_deployment', 'deployment': 'embedding_deployment', 'openai_api_key': 'api_key', 'openai_api_base': 'api_base', 'openai_api_type': 'api_type', 'openai_api_version': 'api_version'}), '(model=embedding_deployment, deployment=\n embedding_deployment, openai_api_key=api_key, openai_api_base=api_base,\n openai_api_type=api_type, openai_api_version=api_version)\n', (1072, 1250), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((370, 389), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (387, 389), False, 'import logging\n'), ((1347, 1382), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""local-data"""'], {}), "('local-data')\n", (1368, 1382), False, 'from llama_index import LLMPredictor, VectorStoreIndex, SimpleDirectoryReader, ServiceContext, LangchainEmbedding\n')]
import time import os import streamlit as st import openai import logging import sys import llama_index from qdrant_client import QdrantClient from llama_index import VectorStoreIndex, ServiceContext from llama_index.llms import OpenAI from llama_index import SimpleDirectoryReader from llama_index.storage.storage_context import StorageContext from llama_index.vector_stores.qdrant import QdrantVectorStore from llama_index import set_global_service_context from llama_index.embeddings import VoyageEmbedding from qdrant_client.models import Distance, VectorParams version = "1.0.2" st.set_page_config(page_title=f"Courier v{version}", page_icon="🌎", layout="centered", initial_sidebar_state="auto", menu_items=None) st.title(f"Courier v{version}") # Set up logging and tracing via Arize Phoenix logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) # Use Voyager Lite Embeddings model_name = "voyage-lite-01-instruct" voyage_api_key = os.environ.get("VOYAGE_API_KEY", "") embed_model = VoyageEmbedding( model_name=model_name, voyage_api_key=voyage_api_key ) # Update Custom QA Template with Gaia Information if "messages" not in st.session_state.keys(): # Initialize the chat messages history st.session_state.messages = [ {"role": "assistant", "content": "Hello, my name is Courier. I'm an Generative AI Assistant designed to help Proctor Academy students. Ask me anything about the Proctor Handbook or any current Proctor Academy staff."} ] openai.api_key = "" @st.cache_resource(show_spinner=False) def load_data(): with st.spinner(text=f"Loading Courier v{version} ..."): docs = SimpleDirectoryReader(input_dir="./data", recursive=True).load_data() qdrant_client = QdrantClient( url="https://02aec354-4932-4062-9e00-422eacb506fc.us-east4-0.gcp.cloud.qdrant.io", api_key="", ) qdrant_client.create_collection(collection_name="courierv52",vectors_config=VectorParams(size=1024, distance=Distance.EUCLID),) service_context = ServiceContext.from_defaults(embed_model=embed_model,llm=OpenAI(model="gpt-4", max_tokens=1500, temperature=0.5, system_prompt="Keep your answers technical and based on facts and do not hallucinate in responses. In addition, make sure all responses look natural, no Answer: or Query: in the response. Always attempt to query database.")) set_global_service_context(service_context) vector_store = QdrantVectorStore(client=qdrant_client, collection_name="courierv52") storage_context = StorageContext.from_defaults(vector_store=vector_store) index = VectorStoreIndex.from_documents( docs, storage_context=storage_context, service_context=service_context, ) return index index = load_data() if "chat_engine" not in st.session_state.keys(): # Initialize the chat engine st.session_state.chat_engine = index.as_chat_engine(streaming=True,chat_mode="condense_question",max_tokens=1500,verbose=True) if prompt := st.chat_input("Your question"): # Prompt for user input and save to chat history st.session_state.messages.append({"role": "user", "content": prompt}) for message in st.session_state.messages: # Display the prior chat messages with st.chat_message(message["role"]): st.write(message["content"]) # If last message is not from assistant, generate a new response if st.session_state.messages[-1]["role"] != "assistant": with st.chat_message("assistant"): res_box = st.empty() # Placeholder for the response text with st.spinner("Thinking..."): response = st.session_state.chat_engine.stream_chat(prompt) full_response = "" for token in response.response_gen: full_response += "".join(token) res_box.write(full_response) message = {"role": "assistant", "content": response.response} st.session_state.messages.append(message)
[ "llama_index.VectorStoreIndex.from_documents", "llama_index.vector_stores.qdrant.QdrantVectorStore", "llama_index.storage.storage_context.StorageContext.from_defaults", "llama_index.SimpleDirectoryReader", "llama_index.embeddings.VoyageEmbedding", "llama_index.llms.OpenAI", "llama_index.set_global_service_context" ]
[((585, 723), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': 'f"""Courier v{version}"""', 'page_icon': '"""🌎"""', 'layout': '"""centered"""', 'initial_sidebar_state': '"""auto"""', 'menu_items': 'None'}), "(page_title=f'Courier v{version}', page_icon='🌎', layout=\n 'centered', initial_sidebar_state='auto', menu_items=None)\n", (603, 723), True, 'import streamlit as st\n'), ((719, 750), 'streamlit.title', 'st.title', (['f"""Courier v{version}"""'], {}), "(f'Courier v{version}')\n", (727, 750), True, 'import streamlit as st\n'), ((799, 857), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (818, 857), False, 'import logging\n'), ((1019, 1055), 'os.environ.get', 'os.environ.get', (['"""VOYAGE_API_KEY"""', '""""""'], {}), "('VOYAGE_API_KEY', '')\n", (1033, 1055), False, 'import os\n'), ((1071, 1140), 'llama_index.embeddings.VoyageEmbedding', 'VoyageEmbedding', ([], {'model_name': 'model_name', 'voyage_api_key': 'voyage_api_key'}), '(model_name=model_name, voyage_api_key=voyage_api_key)\n', (1086, 1140), False, 'from llama_index.embeddings import VoyageEmbedding\n'), ((1573, 1610), 'streamlit.cache_resource', 'st.cache_resource', ([], {'show_spinner': '(False)'}), '(show_spinner=False)\n', (1590, 1610), True, 'import streamlit as st\n'), ((889, 929), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (910, 929), False, 'import logging\n'), ((1220, 1243), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (1241, 1243), True, 'import streamlit as st\n'), ((2878, 2901), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (2899, 2901), True, 'import streamlit as st\n'), ((3077, 3107), 'streamlit.chat_input', 'st.chat_input', (['"""Your question"""'], {}), "('Your question')\n", (3090, 3107), True, 'import streamlit as st\n'), ((3162, 3231), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (3194, 3231), True, 'import streamlit as st\n'), ((858, 877), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (875, 877), False, 'import logging\n'), ((1637, 1687), 'streamlit.spinner', 'st.spinner', ([], {'text': 'f"""Loading Courier v{version} ..."""'}), "(text=f'Loading Courier v{version} ...')\n", (1647, 1687), True, 'import streamlit as st\n'), ((1798, 1915), 'qdrant_client.QdrantClient', 'QdrantClient', ([], {'url': '"""https://02aec354-4932-4062-9e00-422eacb506fc.us-east4-0.gcp.cloud.qdrant.io"""', 'api_key': '""""""'}), "(url=\n 'https://02aec354-4932-4062-9e00-422eacb506fc.us-east4-0.gcp.cloud.qdrant.io'\n , api_key='')\n", (1810, 1915), False, 'from qdrant_client import QdrantClient\n'), ((2449, 2492), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (2475, 2492), False, 'from llama_index import set_global_service_context\n'), ((2516, 2585), 'llama_index.vector_stores.qdrant.QdrantVectorStore', 'QdrantVectorStore', ([], {'client': 'qdrant_client', 'collection_name': '"""courierv52"""'}), "(client=qdrant_client, collection_name='courierv52')\n", (2533, 2585), False, 'from llama_index.vector_stores.qdrant import QdrantVectorStore\n'), ((2612, 2667), 'llama_index.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (2640, 2667), False, 'from llama_index.storage.storage_context import StorageContext\n'), ((2684, 2791), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['docs'], {'storage_context': 'storage_context', 'service_context': 'service_context'}), '(docs, storage_context=storage_context,\n service_context=service_context)\n', (2715, 2791), False, 'from llama_index import VectorStoreIndex, ServiceContext\n'), ((3318, 3350), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (3333, 3350), True, 'import streamlit as st\n'), ((3360, 3388), 'streamlit.write', 'st.write', (["message['content']"], {}), "(message['content'])\n", (3368, 3388), True, 'import streamlit as st\n'), ((3525, 3553), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (3540, 3553), True, 'import streamlit as st\n'), ((3577, 3587), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (3585, 3587), True, 'import streamlit as st\n'), ((3642, 3667), 'streamlit.spinner', 'st.spinner', (['"""Thinking..."""'], {}), "('Thinking...')\n", (3652, 3667), True, 'import streamlit as st\n'), ((3696, 3744), 'streamlit.session_state.chat_engine.stream_chat', 'st.session_state.chat_engine.stream_chat', (['prompt'], {}), '(prompt)\n', (3736, 3744), True, 'import streamlit as st\n'), ((4027, 4068), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (['message'], {}), '(message)\n', (4059, 4068), True, 'import streamlit as st\n'), ((1704, 1761), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_dir': '"""./data"""', 'recursive': '(True)'}), "(input_dir='./data', recursive=True)\n", (1725, 1761), False, 'from llama_index import SimpleDirectoryReader\n'), ((2025, 2074), 'qdrant_client.models.VectorParams', 'VectorParams', ([], {'size': '(1024)', 'distance': 'Distance.EUCLID'}), '(size=1024, distance=Distance.EUCLID)\n', (2037, 2074), False, 'from qdrant_client.models import Distance, VectorParams\n'), ((2160, 2449), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-4"""', 'max_tokens': '(1500)', 'temperature': '(0.5)', 'system_prompt': '"""Keep your answers technical and based on facts and do not hallucinate in responses. In addition, make sure all responses look natural, no Answer: or Query: in the response. Always attempt to query database."""'}), "(model='gpt-4', max_tokens=1500, temperature=0.5, system_prompt=\n 'Keep your answers technical and based on facts and do not hallucinate in responses. In addition, make sure all responses look natural, no Answer: or Query: in the response. Always attempt to query database.'\n )\n", (2166, 2449), False, 'from llama_index.llms import OpenAI\n')]
#! coding: utf-8 import os from dataclasses import dataclass from typing import List, Dict, Optional from llama_index import ServiceContext, get_response_synthesizer, VectorStoreIndex, StorageContext, \ load_indices_from_storage, TreeIndex from llama_index.indices.base import BaseIndex from llama_index.indices.postprocessor import LLMRerank from llama_index.indices.tree.base import TreeRetrieverMode from llama_index.query_engine import RetrieverQueryEngine from llama_index.response_synthesizers import ResponseMode, BaseSynthesizer from common.config import index_dir from common.prompt import CH_CHOICE_SELECT_PROMPT, CH_TREE_SUMMARIZE_PROMPT from query_todo.retrievers import MultiRetriever def load_index(title: str, service_context: ServiceContext = None) -> List[BaseIndex]: storage_context = StorageContext.from_defaults(persist_dir=os.path.join(index_dir, title)) return load_indices_from_storage( storage_context=storage_context, service_context=service_context, ) def load_indices(service_context: ServiceContext) -> Dict[str, List[BaseIndex]]: indices: Dict[str, List[BaseIndex]] = {} for title in os.listdir(index_dir): indices[title] = load_index(title, service_context) return indices def create_response_synthesizer(service_context: ServiceContext = None) -> BaseSynthesizer: # TODO # https://docs.llamaindex.ai/en/stable/module_guides/querying/response_synthesizers/root.html#get-started raise NotImplementedError @dataclass class DocumentQueryEngineFactory: indices: List[BaseIndex] summary: Optional[str] = "" def first_index(self): return self.indices[0] def create_retrievers(self): # TODO # 基于indices 创建多个retriever # https://docs.llamaindex.ai/en/stable/understanding/querying/querying.html#customizing-the-stages-of-querying raise NotImplementedError def doc_store(self): return self.indices[0].docstore def create_query_engine(self, service_context: ServiceContext) -> RetrieverQueryEngine: # TODO # 结合 retriever, llm_rerank, response_synthesizer 创建一个完整的query engine # https://docs.llamaindex.ai/en/stable/understanding/querying/querying.html raise NotImplementedError
[ "llama_index.load_indices_from_storage" ]
[((899, 995), 'llama_index.load_indices_from_storage', 'load_indices_from_storage', ([], {'storage_context': 'storage_context', 'service_context': 'service_context'}), '(storage_context=storage_context, service_context=\n service_context)\n', (924, 995), False, 'from llama_index import ServiceContext, get_response_synthesizer, VectorStoreIndex, StorageContext, load_indices_from_storage, TreeIndex\n'), ((1159, 1180), 'os.listdir', 'os.listdir', (['index_dir'], {}), '(index_dir)\n', (1169, 1180), False, 'import os\n'), ((856, 886), 'os.path.join', 'os.path.join', (['index_dir', 'title'], {}), '(index_dir, title)\n', (868, 886), False, 'import os\n')]
import logging import glob from pathlib import Path from llama_index import ( SimpleDirectoryReader, download_loader ) class DataReader: __LOGGER_NAME = "data_reader" __SIMPLE_SUPPORTED_EXTENSIONS = [".csv", ".docx", ".epub", ".hwp", ".ipynb", ".jpeg", ".mbox", ".md", ".mp3", ".pdf", ".png", ".pptm", ".pptx"] __JSON_READER_LOADER = "JSONReader" __WIKIPEDIA_READER_LOADER = "WikipediaReader" def __init__(self, data_dir): # Set logger self.logger = logging.getLogger(self.__LOGGER_NAME) # Data directory and files to load self.data_dir = data_dir def load(self): """ Loads the documents from all the given directories. :return: List of llama-index documents """ documents = [] if self.data_dir is not None: loaders = [ self.__load_simple, self.__load_json, self.__load_wiki ] self.logger.info(f"Loading documents from {self.data_dir} directory ...") for load in loaders: documents.extend(load()) self.logger.info(f"Loaded {len(documents)} documents") else: self.logger.info("No data directory specified, skipping loading documents") return documents def __load_simple(self): """ Loads the documents from the given data directory only for supported file types. The best file reader will be automatically selected from the given file extensions. Docs: https://docs.llamaindex.ai/en/stable/module_guides/loading/simpledirectoryreader.html#supported-file-types :return: List of llama-index documents """ self.logger.debug(f"Loading simple documents ...") documents = SimpleDirectoryReader( input_dir=self.data_dir, required_exts=self.__SIMPLE_SUPPORTED_EXTENSIONS ).load_data() self.logger.debug(f"Loaded {len(documents)} documents") return documents def __load_json(self): """ Loads the JSON documents from the given data directory. :return: List of llama-index documents """ json_files = self.__get_all_files_with_ext("json") JSONReader = download_loader(self.__JSON_READER_LOADER) loader = JSONReader() self.logger.debug(f"Loading JSON documents ...") documents = [] for json_file in json_files: documents.extend(loader.load_data(Path(json_file), is_jsonl=False)) self.logger.debug(f"Loaded {len(documents)} JSON documents") return documents def __load_wiki(self): """ Loads the wikipedia pages from the given data directory. :return: List of llama-index documents """ wiki_files = self.__get_all_files_with_ext("wikipedia") wiki_pages = [] for wiki_file in wiki_files: wiki_pages.extend(self.__get_pages(wiki_file)) WikipediaReader = download_loader(self.__WIKIPEDIA_READER_LOADER) loader = WikipediaReader() self.logger.debug(f"Loading Wikipedia pages ...") documents = loader.load_data(pages=wiki_pages) self.logger.debug(f"Loaded {len(documents)} Wikipedia documents") return documents def __get_all_files_with_ext(self, file_ext): """ Gets all the files with the given extension from the data directory. :param file_ext: The file extension to search for :return: List of file paths """ return glob.glob(f"{self.data_dir}/*.{file_ext}") @staticmethod def __get_pages(file_path): """ Reads the pages/links/documents from the given file path. :param file_path: The path to the file containing the pages :return: List of pages """ with open(file_path, "r") as f: links = f.readlines() return links
[ "llama_index.download_loader", "llama_index.SimpleDirectoryReader" ]
[((498, 535), 'logging.getLogger', 'logging.getLogger', (['self.__LOGGER_NAME'], {}), '(self.__LOGGER_NAME)\n', (515, 535), False, 'import logging\n'), ((2288, 2330), 'llama_index.download_loader', 'download_loader', (['self.__JSON_READER_LOADER'], {}), '(self.__JSON_READER_LOADER)\n', (2303, 2330), False, 'from llama_index import SimpleDirectoryReader, download_loader\n'), ((3031, 3078), 'llama_index.download_loader', 'download_loader', (['self.__WIKIPEDIA_READER_LOADER'], {}), '(self.__WIKIPEDIA_READER_LOADER)\n', (3046, 3078), False, 'from llama_index import SimpleDirectoryReader, download_loader\n'), ((3591, 3633), 'glob.glob', 'glob.glob', (['f"""{self.data_dir}/*.{file_ext}"""'], {}), "(f'{self.data_dir}/*.{file_ext}')\n", (3600, 3633), False, 'import glob\n'), ((1809, 1910), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_dir': 'self.data_dir', 'required_exts': 'self.__SIMPLE_SUPPORTED_EXTENSIONS'}), '(input_dir=self.data_dir, required_exts=self.\n __SIMPLE_SUPPORTED_EXTENSIONS)\n', (1830, 1910), False, 'from llama_index import SimpleDirectoryReader, download_loader\n'), ((2525, 2540), 'pathlib.Path', 'Path', (['json_file'], {}), '(json_file)\n', (2529, 2540), False, 'from pathlib import Path\n')]
from llama_index import VectorStoreIndex, SimpleDirectoryReader, LLMPredictor, set_global_service_context, ServiceContext from llama_index.llms import LlamaCPP llm = LlamaCPP(model_path="./models/llama-2-13b-chat.Q4_0.gguf") llm_predictor = LLMPredictor(llm=llm) service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor) set_global_service_context(service_context=service_context) documents = SimpleDirectoryReader('data').load_data() index = VectorStoreIndex.from_documents(documents, show_progress=True) query_engine = index.as_query_engine() response = query_engine.query("¿Qué tal es la carrera de Administración de Empresas?") print(response) response = query_engine.query("¿Qué tal es la carrera de Comercio Exterior?") print(response) index.storage_context.persist("./storage")
[ "llama_index.VectorStoreIndex.from_documents", "llama_index.LLMPredictor", "llama_index.ServiceContext.from_defaults", "llama_index.SimpleDirectoryReader", "llama_index.llms.LlamaCPP", "llama_index.set_global_service_context" ]
[((167, 225), 'llama_index.llms.LlamaCPP', 'LlamaCPP', ([], {'model_path': '"""./models/llama-2-13b-chat.Q4_0.gguf"""'}), "(model_path='./models/llama-2-13b-chat.Q4_0.gguf')\n", (175, 225), False, 'from llama_index.llms import LlamaCPP\n'), ((242, 263), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (254, 263), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, LLMPredictor, set_global_service_context, ServiceContext\n'), ((283, 340), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor'}), '(llm_predictor=llm_predictor)\n', (311, 340), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, LLMPredictor, set_global_service_context, ServiceContext\n'), ((342, 401), 'llama_index.set_global_service_context', 'set_global_service_context', ([], {'service_context': 'service_context'}), '(service_context=service_context)\n', (368, 401), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, LLMPredictor, set_global_service_context, ServiceContext\n'), ((466, 528), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'show_progress': '(True)'}), '(documents, show_progress=True)\n', (497, 528), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, LLMPredictor, set_global_service_context, ServiceContext\n'), ((415, 444), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""data"""'], {}), "('data')\n", (436, 444), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, LLMPredictor, set_global_service_context, ServiceContext\n')]
#!/usr/bin/env python3 # -*- coding utf-8 -*- import openai import yaml from langchain.chat_models import ChatOpenAI from langchain.text_splitter import SpacyTextSplitter from llama_index import GPTListIndex, LLMPredictor, ServiceContext, SimpleDirectoryReader from llama_index.node_parser import SimpleNodeParser ''' llama_index对于文章进行小结 本例中,相当于对文本分段,按树状结构逐步向上进行总结 使用spaCy分词模型 python -m spacy download zh_core_web_sm ''' def get_api_key(): with open("config.yaml", "r", encoding="utf-8") as yaml_file: yaml_data = yaml.safe_load(yaml_file) openai.api_key = yaml_data["openai"]["api_key"] if __name__ == '__main__': get_api_key() # 配置LLM llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo", max_tokens=1024)) service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor) # 分词模型,最长2048个token text_splitter = SpacyTextSplitter(pipeline="zh_core_web_sm", chunk_size = 2048) parser = SimpleNodeParser(text_splitter=text_splitter) # 加载目录中的语料 documents = SimpleDirectoryReader('./data/mr_fujino').load_data() # 获取语料中的nodes nodes = parser.get_nodes_from_documents(documents) # 最简单的索引结构GPTListIndex list_index = GPTListIndex(nodes=nodes, service_context=service_context) # 树状模型进行总结 response = list_index.query("下面鲁迅先生以第一人称‘我’写的内容,请你用中文总结一下:", response_mode="tree_summarize") print(response)
[ "llama_index.node_parser.SimpleNodeParser", "llama_index.ServiceContext.from_defaults", "llama_index.GPTListIndex", "llama_index.SimpleDirectoryReader" ]
[((803, 860), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor'}), '(llm_predictor=llm_predictor)\n', (831, 860), False, 'from llama_index import GPTListIndex, LLMPredictor, ServiceContext, SimpleDirectoryReader\n'), ((906, 967), 'langchain.text_splitter.SpacyTextSplitter', 'SpacyTextSplitter', ([], {'pipeline': '"""zh_core_web_sm"""', 'chunk_size': '(2048)'}), "(pipeline='zh_core_web_sm', chunk_size=2048)\n", (923, 967), False, 'from langchain.text_splitter import SpacyTextSplitter\n'), ((983, 1028), 'llama_index.node_parser.SimpleNodeParser', 'SimpleNodeParser', ([], {'text_splitter': 'text_splitter'}), '(text_splitter=text_splitter)\n', (999, 1028), False, 'from llama_index.node_parser import SimpleNodeParser\n'), ((1234, 1292), 'llama_index.GPTListIndex', 'GPTListIndex', ([], {'nodes': 'nodes', 'service_context': 'service_context'}), '(nodes=nodes, service_context=service_context)\n', (1246, 1292), False, 'from llama_index import GPTListIndex, LLMPredictor, ServiceContext, SimpleDirectoryReader\n'), ((530, 555), 'yaml.safe_load', 'yaml.safe_load', (['yaml_file'], {}), '(yaml_file)\n', (544, 555), False, 'import yaml\n'), ((709, 779), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo"""', 'max_tokens': '(1024)'}), "(temperature=0, model_name='gpt-3.5-turbo', max_tokens=1024)\n", (719, 779), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1061, 1102), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""./data/mr_fujino"""'], {}), "('./data/mr_fujino')\n", (1082, 1102), False, 'from llama_index import GPTListIndex, LLMPredictor, ServiceContext, SimpleDirectoryReader\n')]
from typing import Any, Dict, List, Optional, Sequence, Tuple from llama_index.core.base.response.schema import RESPONSE_TYPE, Response from llama_index.core.callbacks.base import CallbackManager from llama_index.core.callbacks.schema import CBEventType, EventPayload from llama_index.core.indices.multi_modal import MultiModalVectorIndexRetriever from llama_index.core.indices.query.base import BaseQueryEngine from llama_index.core.indices.query.schema import QueryBundle, QueryType from llama_index.core.multi_modal_llms.base import MultiModalLLM from llama_index.core.postprocessor.types import BaseNodePostprocessor from llama_index.core.prompts import BasePromptTemplate from llama_index.core.prompts.default_prompts import DEFAULT_TEXT_QA_PROMPT from llama_index.core.query_engine.citation_query_engine import CITATION_QA_TEMPLATE from llama_index.core.prompts import PromptTemplate from llama_index.core.prompts.mixin import PromptMixinType from llama_index.core.schema import ImageNode, NodeWithScore from mm_retriever import MultiModalQdrantRetriever # rewrite CITATION_QA_TEMPLATE TEXT_QA_TEMPLATE = PromptTemplate( "Please provide an answer based solely on the provided sources. " "When referencing information from a source, " "cite the appropriate source(s) using their corresponding numbers. " "Every answer should include at least one source citation. " "Only cite a source when you are explicitly referencing it. " "If none of the sources are helpful, you should indicate that. " "Below are several numbered sources of information:" "\n------\n" "{context_str}" "\n------\n" "Query: {query_str}\n" "Answer: " ) IMAGE_QA_TEMPLATE = PromptTemplate( "\n' + r.node.get_content() for r in image_nodes]), text_context_response=text_context_response.text.replace("\n"," ").strip(), image_context_response=i_q_response.text.replace("\n"," ").strip(), ) return Response( response=str(synthesized_response), source_nodes=text_nodes+image_nodes, metadata={ "query_str": query_bundle.query_str, "model_config": self._multi_modal_llm.metadata, }, ) # async def asynthesize( # self, # query_bundle: QueryBundle, # nodes: List[NodeWithScore], # additional_source_nodes: Optional[Sequence[NodeWithScore]] = None, # ) -> RESPONSE_TYPE: # image_nodes, text_nodes = _get_image_and_text_nodes(nodes) # context_str = "\n\n".join([r.get_content() for r in text_nodes]) # fmt_prompt = self._text_qa_template.format( # context_str=context_str, query_str=query_bundle.query_str # ) # llm_response = await self._multi_modal_llm.acomplete( # prompt=fmt_prompt, # image_documents=image_nodes, # ) # return Response( # response=str(llm_response), # source_nodes=nodes, # metadata={"text_nodes": text_nodes, "image_nodes": image_nodes}, # ) async def asynthesize( self, query_bundle: QueryBundle, nodes: List[NodeWithScore], additional_source_nodes: Optional[Sequence[NodeWithScore]] = None, ) -> RESPONSE_TYPE: raise NotImplementedError("Async synthesize not implemented yet") def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE: """Answer a query.""" with self.callback_manager.event( CBEventType.QUERY, payload={EventPayload.QUERY_STR: query_bundle.query_str} ) as query_event: with self.callback_manager.event( CBEventType.RETRIEVE, payload={EventPayload.QUERY_STR: query_bundle.query_str}, ) as retrieve_event: retrieval_results = self.retrieve(query_bundle) retrieve_event.on_end( payload={EventPayload.NODES: retrieval_results}, ) response = self.synthesize( query_bundle, retrieval_results=retrieval_results, ) query_event.on_end(payload={EventPayload.RESPONSE: response}) return response async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE: raise NotImplementedError("Async query not implemented yet") # async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE: # """Answer a query.""" # with self.callback_manager.event( # CBEventType.QUERY, payload={EventPayload.QUERY_STR: query_bundle.query_str} # ) as query_event: # with self.callback_manager.event( # CBEventType.RETRIEVE, # payload={EventPayload.QUERY_STR: query_bundle.query_str}, # ) as retrieve_event: # nodes = await self.aretrieve(query_bundle) # retrieve_event.on_end( # payload={EventPayload.NODES: nodes}, # ) # response = await self.asynthesize( # query_bundle, # nodes=nodes, # ) # query_event.on_end(payload={EventPayload.RESPONSE: response}) # return response @property def retriever(self) -> MultiModalVectorIndexRetriever: """Get the retriever object.""" return self._retriever
[ "llama_index.core.prompts.PromptTemplate", "llama_index.core.callbacks.base.CallbackManager" ]
[((1114, 1604), 'llama_index.core.prompts.PromptTemplate', 'PromptTemplate', (['"""Please provide an answer based solely on the provided sources. When referencing information from a source, cite the appropriate source(s) using their corresponding numbers. Every answer should include at least one source citation. Only cite a source when you are explicitly referencing it. If none of the sources are helpful, you should indicate that. Below are several numbered sources of information:\n------\n{context_str}\n------\nQuery: {query_str}\nAnswer: """'], {}), '(\n """Please provide an answer based solely on the provided sources. When referencing information from a source, cite the appropriate source(s) using their corresponding numbers. Every answer should include at least one source citation. Only cite a source when you are explicitly referencing it. If none of the sources are helpful, you should indicate that. Below are several numbered sources of information:\n------\n{context_str}\n------\nQuery: {query_str}\nAnswer: """\n )\n', (1128, 1604), False, 'from llama_index.core.prompts import PromptTemplate\n'), ((1700, 2135), 'llama_index.core.prompts.PromptTemplate', 'PromptTemplate', (['"""<image>\nCaption: {context_str}\n------\nYou are a smart agent who can answer questions based on external information. Above is an annotated image you retrieved. Please provide an answer to the query based solely on the image and caption. If the image is not helpful, you should indicate that. \nQuery: {query_str}\nNote: Don\'t include expressions like "This image appears to be XXX" in your answer.\nAnswer: """'], {}), '(\n """<image>\nCaption: {context_str}\n------\nYou are a smart agent who can answer questions based on external information. Above is an annotated image you retrieved. Please provide an answer to the query based solely on the image and caption. If the image is not helpful, you should indicate that. \nQuery: {query_str}\nNote: Don\'t include expressions like "This image appears to be XXX" in your answer.\nAnswer: """\n )\n', (1714, 2135), False, 'from llama_index.core.prompts import PromptTemplate\n'), ((2223, 2517), 'llama_index.core.prompts.PromptTemplate', 'PromptTemplate', (['(\n \'With the following sources related to your question from my knowledge base: \\n\\n\'\n + \'-\' * 50 +\n """\nParagraphs:\n\n{context_str}\n\nImages:\n{image_context_str}\n\n""" + \'-\' *\n 50 +\n """\nHere is my answer:\n\n{text_context_response}\n{image_context_response}"""\n )'], {}), '(\n """With the following sources related to your question from my knowledge base: \n\n"""\n + \'-\' * 50 +\n """\nParagraphs:\n\n{context_str}\n\nImages:\n{image_context_str}\n\n""" + \'-\' *\n 50 +\n """\nHere is my answer:\n\n{text_context_response}\n{image_context_response}"""\n )\n', (2237, 2517), False, 'from llama_index.core.prompts import PromptTemplate\n'), ((4563, 4582), 'llama_index.core.callbacks.base.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (4578, 4582), False, 'from llama_index.core.callbacks.base import CallbackManager\n')]
import asyncio import os import tempfile import traceback from datetime import datetime, date from functools import partial from pathlib import Path import discord import aiohttp import openai import tiktoken from langchain.chat_models import ChatOpenAI from llama_index import ( QuestionAnswerPrompt, GPTVectorStoreIndex, BeautifulSoupWebReader, Document, LLMPredictor, OpenAIEmbedding, SimpleDirectoryReader, MockEmbedding, ServiceContext, get_response_synthesizer, ) from llama_index.callbacks import CallbackManager, TokenCountingHandler from llama_index.composability import QASummaryQueryEngineBuilder from llama_index.retrievers import VectorIndexRetriever from llama_index.query_engine import RetrieverQueryEngine, MultiStepQueryEngine from llama_index.indices.query.query_transform import StepDecomposeQueryTransform from llama_index.prompts.chat_prompts import CHAT_REFINE_PROMPT from llama_index.readers.web import DEFAULT_WEBSITE_EXTRACTOR from langchain import OpenAI from models.openai_model import Models from services.environment_service import EnvService MAX_SEARCH_PRICE = EnvService.get_max_search_price() class Search: def __init__(self, gpt_model, usage_service): self.model = gpt_model self.usage_service = usage_service self.google_search_api_key = EnvService.get_google_search_api_key() self.google_search_engine_id = EnvService.get_google_search_engine_id() self.loop = asyncio.get_running_loop() self.qaprompt = QuestionAnswerPrompt( "You are formulating the response to a search query given the search prompt and the context. Context information is below. The text '<|endofstatement|>' is used to separate chat entries and make it easier for you to understand the context\n" "---------------------\n" "{context_str}" "\n---------------------\n" "Never say '<|endofstatement|>'\n" "Given the context information and not prior knowledge, " "answer the question, say that you were unable to answer the question if there is not sufficient context to formulate a decisive answer. If the prior knowledge/context was sufficient, simply repeat it. The search query was: {query_str}\n" ) self.openai_key = os.getenv("OPENAI_TOKEN") self.EMBED_CUTOFF = 2000 def add_search_index(self, index, user_id, query): # Create a folder called "indexes/{USER_ID}" if it doesn't exist already Path(f"{EnvService.save_path()}/indexes/{user_id}_search").mkdir( parents=True, exist_ok=True ) # Save the index to file under the user id file = f"{date.today().month}_{date.today().day}_{query[:20]}" index.storage_context.persist( persist_dir=EnvService.save_path() / "indexes" / f"{str(user_id)}_search" / f"{file}" ) def build_search_started_embed(self): embed = discord.Embed( title="Searching the web...", description="Refining google search query...", color=discord.Color.blurple(), ) embed.set_thumbnail(url="https://i.imgur.com/txHhNzL.png") return embed def build_search_refined_embed(self, refined_query): embed = discord.Embed( title="Searching the web...", description="Refined query:\n" + f"`{refined_query}`" + "\nRetrieving links from google...", color=discord.Color.blurple(), ) embed.set_thumbnail(url="https://i.imgur.com/txHhNzL.png") return embed def build_search_links_retrieved_embed(self, refined_query): embed = discord.Embed( title="Searching the web...", description="Refined query:\n" + f"`{refined_query}`" "\nRetrieving webpages...", color=discord.Color.blurple(), ) embed.set_thumbnail(url="https://i.imgur.com/txHhNzL.png") return embed def build_search_determining_price_embed(self, refined_query): embed = discord.Embed( title="Searching the web...", description="Refined query:\n" + f"`{refined_query}`" "\nPre-determining index price...", color=discord.Color.blurple(), ) embed.set_thumbnail(url="https://i.imgur.com/txHhNzL.png") return embed def build_search_webpages_retrieved_embed(self, refined_query): embed = discord.Embed( title="Searching the web...", description="Refined query:\n" + f"`{refined_query}`" "\nIndexing...", color=discord.Color.blurple(), ) embed.set_thumbnail(url="https://i.imgur.com/txHhNzL.png") return embed def build_search_indexed_embed(self, refined_query): embed = discord.Embed( title="Searching the web...", description="Refined query:\n" + f"`{refined_query}`" "\nThinking about your question...", color=discord.Color.blurple(), ) embed.set_thumbnail(url="https://i.imgur.com/txHhNzL.png") return embed def build_search_final_embed(self, refined_query, price): embed = discord.Embed( title="Searching the web...", description="Refined query:\n" + f"`{refined_query}`" "\nDone!\n||The total price was $" + price + "||", color=discord.Color.blurple(), ) embed.set_thumbnail(url="https://i.imgur.com/txHhNzL.png") return embed def index_webpage(self, url) -> list[Document]: documents = BeautifulSoupWebReader( website_extractor=DEFAULT_WEBSITE_EXTRACTOR ).load_data(urls=[url]) return documents async def index_pdf(self, url) -> list[Document]: # Download the PDF at the url and save it to a tempfile async with aiohttp.ClientSession() as session: async with session.get(url) as response: if response.status == 200: data = await response.read() f = tempfile.NamedTemporaryFile(suffix=".pdf", delete=False) f.write(data) f.close() else: raise ValueError("Could not download PDF") # Get the file path of this tempfile.NamedTemporaryFile # Save this temp file to an actual file that we can put into something else to read it documents = SimpleDirectoryReader(input_files=[f.name]).load_data() for document in documents: document.extra_info = {"URL": url} # Delete the temporary file return documents async def get_links(self, query, search_scope=2): """Search the web for a query""" async with aiohttp.ClientSession() as session: async with session.get( f"https://www.googleapis.com/customsearch/v1?key={self.google_search_api_key}&cx={self.google_search_engine_id}&q={query}" ) as response: if response.status == 200: data = await response.json() # Return a list of the top 2 links return ( [item["link"] for item in data["items"][:search_scope]], [item["link"] for item in data["items"]], ) else: raise ValueError( "Error while retrieving links, the response returned " + str(response.status) + " with the message " + str(await response.text()) ) async def try_edit(self, message, embed): try: await message.edit(embed=embed) except Exception: traceback.print_exc() pass async def try_delete(self, message): try: await message.delete() except Exception: traceback.print_exc() pass async def search( self, ctx: discord.ApplicationContext, query, user_api_key, search_scope, nodes, deep, response_mode, model, multistep=False, redo=None, ): DEFAULT_SEARCH_NODES = 1 if not user_api_key: os.environ["OPENAI_API_KEY"] = self.openai_key else: os.environ["OPENAI_API_KEY"] = user_api_key openai.api_key = os.environ["OPENAI_API_KEY"] # Initialize the search cost price = 0 if ctx: in_progress_message = ( await ctx.respond(embed=self.build_search_started_embed()) if not redo else await ctx.channel.send(embed=self.build_search_started_embed()) ) try: llm_predictor_presearch = OpenAI( max_tokens=50, temperature=0.4, presence_penalty=0.65, model_name="text-davinci-003", ) # Refine a query to send to google custom search API prompt = f"You are to be given a search query for google. Change the query such that putting it into the Google Custom Search API will return the most relevant websites to assist in answering the original query. If the original query is inferring knowledge about the current day, insert the current day into the refined prompt. If the original query is inferring knowledge about the current month, insert the current month and year into the refined prompt. If the original query is inferring knowledge about the current year, insert the current year into the refined prompt. Generally, if the original query is inferring knowledge about something that happened recently, insert the current month into the refined query. Avoid inserting a day, month, or year for queries that purely ask about facts and about things that don't have much time-relevance. The current date is {str(datetime.now().date())}. Do not insert the current date if not neccessary. Respond with only the refined query for the original query. Don’t use punctuation or quotation marks.\n\nExamples:\n---\nOriginal Query: ‘Who is Harald Baldr?’\nRefined Query: ‘Harald Baldr biography’\n---\nOriginal Query: ‘What happened today with the Ohio train derailment?’\nRefined Query: ‘Ohio train derailment details {str(datetime.now().date())}’\n---\nOriginal Query: ‘Is copper in drinking water bad for you?’\nRefined Query: ‘copper in drinking water adverse effects’\n---\nOriginal Query: What's the current time in Mississauga?\nRefined Query: current time Mississauga\nNow, refine the user input query.\nOriginal Query: {query}\nRefined Query:" query_refined = await llm_predictor_presearch.agenerate( prompts=[prompt], ) query_refined_text = query_refined.generations[0][0].text await self.usage_service.update_usage( query_refined.llm_output.get("token_usage").get("total_tokens"), "davinci", ) price += await self.usage_service.get_price( query_refined.llm_output.get("token_usage").get("total_tokens"), "davinci", ) except Exception as e: traceback.print_exc() query_refined_text = query if ctx: await self.try_edit( in_progress_message, self.build_search_refined_embed(query_refined_text) ) # Get the links for the query links, all_links = await self.get_links( query_refined_text, search_scope=search_scope ) if ctx: await self.try_edit( in_progress_message, self.build_search_links_retrieved_embed(query_refined_text), ) if all_links is None: raise ValueError("The Google Search API returned an error.") # For each link, crawl the page and get all the text that's not HTML garbage. # Concatenate all the text for a given website into one string and save it into an array: documents = [] for link in links: # First, attempt a connection with a timeout of 3 seconds to the link, if the timeout occurs, don't # continue to the document loading. pdf = False try: async with aiohttp.ClientSession() as session: async with session.get(link, timeout=1) as response: # Add another entry to links from all_links if the link is not already in it to compensate for the failed request if response.status not in [200, 203, 202, 204]: for link2 in all_links: if link2 not in links: links.append(link2) break continue # Follow redirects elif response.status in [301, 302, 303, 307, 308]: try: links.append(response.url) continue except: continue else: # Detect if the link is a PDF, if it is, we load it differently if response.headers["Content-Type"] == "application/pdf": pdf = True except: try: # Try to add a link from all_links, this is kind of messy. for link2 in all_links: if link2 not in links: links.append(link2) break except: pass continue try: if not pdf: document = await self.loop.run_in_executor( None, partial(self.index_webpage, link) ) else: document = await self.index_pdf(link) [documents.append(doc) for doc in document] except Exception as e: traceback.print_exc() if ctx: await self.try_edit( in_progress_message, self.build_search_webpages_retrieved_embed(query_refined_text), ) embedding_model = OpenAIEmbedding() llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0, model_name=model)) token_counter = TokenCountingHandler( tokenizer=tiktoken.encoding_for_model(model).encode, verbose=False ) callback_manager = CallbackManager([token_counter]) service_context = ServiceContext.from_defaults( llm_predictor=llm_predictor, embed_model=embedding_model, callback_manager=callback_manager, ) # Check price token_counter_mock = TokenCountingHandler( tokenizer=tiktoken.encoding_for_model(model).encode, verbose=False ) callback_manager_mock = CallbackManager([token_counter_mock]) embed_model_mock = MockEmbedding(embed_dim=1536) service_context_mock = ServiceContext.from_defaults( embed_model=embed_model_mock, callback_manager=callback_manager_mock ) self.loop.run_in_executor( None, partial( GPTVectorStoreIndex.from_documents, documents, service_context=service_context_mock, ), ) total_usage_price = await self.usage_service.get_price( token_counter_mock.total_embedding_token_count, "embedding" ) if total_usage_price > 1.00: raise ValueError( "Doing this search would be prohibitively expensive. Please try a narrower search scope." ) if not deep: index = await self.loop.run_in_executor( None, partial( GPTVectorStoreIndex.from_documents, documents, service_context=service_context, use_async=True, ), ) # save the index to disk if not a redo if not redo: self.add_search_index( index, ctx.user.id if isinstance(ctx, discord.ApplicationContext) else ctx.author.id, query, ) else: if ctx: await self.try_edit( in_progress_message, self.build_search_determining_price_embed(query_refined_text), ) graph_builder = QASummaryQueryEngineBuilder(service_context=service_context) index = await self.loop.run_in_executor( None, partial( graph_builder.build_from_documents, documents, ), ) if ctx: await self.try_edit( in_progress_message, self.build_search_indexed_embed(query_refined_text) ) ######################################## if not deep: step_decompose_transform = StepDecomposeQueryTransform( service_context.llm_predictor ) retriever = VectorIndexRetriever( index=index, similarity_top_k=nodes or DEFAULT_SEARCH_NODES, ) response_synthesizer = get_response_synthesizer( response_mode=response_mode, use_async=True, refine_template=CHAT_REFINE_PROMPT, text_qa_template=self.qaprompt, service_context=service_context, ) query_engine = RetrieverQueryEngine( retriever=retriever, response_synthesizer=response_synthesizer ) multistep_query_engine = MultiStepQueryEngine( query_engine=query_engine, query_transform=step_decompose_transform, index_summary="Provides information about everything you need to know about this topic, use this to answer the question.", ) if multistep: response = await self.loop.run_in_executor( None, partial(multistep_query_engine.query, query), ) else: response = await self.loop.run_in_executor( None, partial(query_engine.query, query), ) else: query_configs = [ { "index_struct_type": "simple_dict", "query_mode": "default", "query_kwargs": {"similarity_top_k": 1}, }, { "index_struct_type": "list", "query_mode": "default", "query_kwargs": { "response_mode": "tree_summarize", "use_async": True, "verbose": True, }, }, { "index_struct_type": "tree", "query_mode": "default", "query_kwargs": { "verbose": True, "use_async": True, "child_branch_factor": 2, }, }, ] response = await self.loop.run_in_executor( None, partial( index.query, query, ), ) await self.usage_service.update_usage( token_counter.total_llm_token_count, await self.usage_service.get_cost_name(model), ) await self.usage_service.update_usage( token_counter.total_embedding_token_count, "embedding" ) price += await self.usage_service.get_price( token_counter.total_llm_token_count, await self.usage_service.get_cost_name(model), ) + await self.usage_service.get_price( token_counter.total_embedding_token_count, "embedding" ) if ctx: await self.try_edit( in_progress_message, self.build_search_final_embed(query_refined_text, str(round(price, 6))), ) return response, query_refined_text
[ "llama_index.get_response_synthesizer", "llama_index.indices.query.query_transform.StepDecomposeQueryTransform", "llama_index.OpenAIEmbedding", "llama_index.composability.QASummaryQueryEngineBuilder", "llama_index.MockEmbedding", "llama_index.QuestionAnswerPrompt", "llama_index.ServiceContext.from_defaults", "llama_index.query_engine.RetrieverQueryEngine", "llama_index.BeautifulSoupWebReader", "llama_index.SimpleDirectoryReader", "llama_index.retrievers.VectorIndexRetriever", "llama_index.callbacks.CallbackManager", "llama_index.query_engine.MultiStepQueryEngine" ]
[((1135, 1168), 'services.environment_service.EnvService.get_max_search_price', 'EnvService.get_max_search_price', ([], {}), '()\n', (1166, 1168), False, 'from services.environment_service import EnvService\n'), ((1346, 1384), 'services.environment_service.EnvService.get_google_search_api_key', 'EnvService.get_google_search_api_key', ([], {}), '()\n', (1382, 1384), False, 'from services.environment_service import EnvService\n'), ((1424, 1464), 'services.environment_service.EnvService.get_google_search_engine_id', 'EnvService.get_google_search_engine_id', ([], {}), '()\n', (1462, 1464), False, 'from services.environment_service import EnvService\n'), ((1485, 1511), 'asyncio.get_running_loop', 'asyncio.get_running_loop', ([], {}), '()\n', (1509, 1511), False, 'import asyncio\n'), ((1536, 2191), 'llama_index.QuestionAnswerPrompt', 'QuestionAnswerPrompt', (['"""You are formulating the response to a search query given the search prompt and the context. Context information is below. The text \'<|endofstatement|>\' is used to separate chat entries and make it easier for you to understand the context\n---------------------\n{context_str}\n---------------------\nNever say \'<|endofstatement|>\'\nGiven the context information and not prior knowledge, answer the question, say that you were unable to answer the question if there is not sufficient context to formulate a decisive answer. If the prior knowledge/context was sufficient, simply repeat it. The search query was: {query_str}\n"""'], {}), '(\n """You are formulating the response to a search query given the search prompt and the context. Context information is below. The text \'<|endofstatement|>\' is used to separate chat entries and make it easier for you to understand the context\n---------------------\n{context_str}\n---------------------\nNever say \'<|endofstatement|>\'\nGiven the context information and not prior knowledge, answer the question, say that you were unable to answer the question if there is not sufficient context to formulate a decisive answer. If the prior knowledge/context was sufficient, simply repeat it. The search query was: {query_str}\n"""\n )\n', (1556, 2191), False, 'from llama_index import QuestionAnswerPrompt, GPTVectorStoreIndex, BeautifulSoupWebReader, Document, LLMPredictor, OpenAIEmbedding, SimpleDirectoryReader, MockEmbedding, ServiceContext, get_response_synthesizer\n'), ((2322, 2347), 'os.getenv', 'os.getenv', (['"""OPENAI_TOKEN"""'], {}), "('OPENAI_TOKEN')\n", (2331, 2347), False, 'import os\n'), ((14651, 14668), 'llama_index.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '()\n', (14666, 14668), False, 'from llama_index import QuestionAnswerPrompt, GPTVectorStoreIndex, BeautifulSoupWebReader, Document, LLMPredictor, OpenAIEmbedding, SimpleDirectoryReader, MockEmbedding, ServiceContext, get_response_synthesizer\n'), ((14920, 14952), 'llama_index.callbacks.CallbackManager', 'CallbackManager', (['[token_counter]'], {}), '([token_counter])\n', (14935, 14952), False, 'from llama_index.callbacks import CallbackManager, TokenCountingHandler\n'), ((14980, 15106), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'embed_model': 'embedding_model', 'callback_manager': 'callback_manager'}), '(llm_predictor=llm_predictor, embed_model=\n embedding_model, callback_manager=callback_manager)\n', (15008, 15106), False, 'from llama_index import QuestionAnswerPrompt, GPTVectorStoreIndex, BeautifulSoupWebReader, Document, LLMPredictor, OpenAIEmbedding, SimpleDirectoryReader, MockEmbedding, ServiceContext, get_response_synthesizer\n'), ((15344, 15381), 'llama_index.callbacks.CallbackManager', 'CallbackManager', (['[token_counter_mock]'], {}), '([token_counter_mock])\n', (15359, 15381), False, 'from llama_index.callbacks import CallbackManager, TokenCountingHandler\n'), ((15409, 15438), 'llama_index.MockEmbedding', 'MockEmbedding', ([], {'embed_dim': '(1536)'}), '(embed_dim=1536)\n', (15422, 15438), False, 'from llama_index import QuestionAnswerPrompt, GPTVectorStoreIndex, BeautifulSoupWebReader, Document, LLMPredictor, OpenAIEmbedding, SimpleDirectoryReader, MockEmbedding, ServiceContext, get_response_synthesizer\n'), ((15470, 15573), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'embed_model_mock', 'callback_manager': 'callback_manager_mock'}), '(embed_model=embed_model_mock, callback_manager\n =callback_manager_mock)\n', (15498, 15573), False, 'from llama_index import QuestionAnswerPrompt, GPTVectorStoreIndex, BeautifulSoupWebReader, Document, LLMPredictor, OpenAIEmbedding, SimpleDirectoryReader, MockEmbedding, ServiceContext, get_response_synthesizer\n'), ((5959, 5982), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (5980, 5982), False, 'import aiohttp\n'), ((6864, 6887), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (6885, 6887), False, 'import aiohttp\n'), ((8966, 9063), 'langchain.OpenAI', 'OpenAI', ([], {'max_tokens': '(50)', 'temperature': '(0.4)', 'presence_penalty': '(0.65)', 'model_name': '"""text-davinci-003"""'}), "(max_tokens=50, temperature=0.4, presence_penalty=0.65, model_name=\n 'text-davinci-003')\n", (8972, 9063), False, 'from langchain import OpenAI\n'), ((15656, 15753), 'functools.partial', 'partial', (['GPTVectorStoreIndex.from_documents', 'documents'], {'service_context': 'service_context_mock'}), '(GPTVectorStoreIndex.from_documents, documents, service_context=\n service_context_mock)\n', (15663, 15753), False, 'from functools import partial\n'), ((17056, 17116), 'llama_index.composability.QASummaryQueryEngineBuilder', 'QASummaryQueryEngineBuilder', ([], {'service_context': 'service_context'}), '(service_context=service_context)\n', (17083, 17116), False, 'from llama_index.composability import QASummaryQueryEngineBuilder\n'), ((17602, 17660), 'llama_index.indices.query.query_transform.StepDecomposeQueryTransform', 'StepDecomposeQueryTransform', (['service_context.llm_predictor'], {}), '(service_context.llm_predictor)\n', (17629, 17660), False, 'from llama_index.indices.query.query_transform import StepDecomposeQueryTransform\n'), ((17716, 17801), 'llama_index.retrievers.VectorIndexRetriever', 'VectorIndexRetriever', ([], {'index': 'index', 'similarity_top_k': '(nodes or DEFAULT_SEARCH_NODES)'}), '(index=index, similarity_top_k=nodes or\n DEFAULT_SEARCH_NODES)\n', (17736, 17801), False, 'from llama_index.retrievers import VectorIndexRetriever\n'), ((17881, 18059), 'llama_index.get_response_synthesizer', 'get_response_synthesizer', ([], {'response_mode': 'response_mode', 'use_async': '(True)', 'refine_template': 'CHAT_REFINE_PROMPT', 'text_qa_template': 'self.qaprompt', 'service_context': 'service_context'}), '(response_mode=response_mode, use_async=True,\n refine_template=CHAT_REFINE_PROMPT, text_qa_template=self.qaprompt,\n service_context=service_context)\n', (17905, 18059), False, 'from llama_index import QuestionAnswerPrompt, GPTVectorStoreIndex, BeautifulSoupWebReader, Document, LLMPredictor, OpenAIEmbedding, SimpleDirectoryReader, MockEmbedding, ServiceContext, get_response_synthesizer\n'), ((18175, 18264), 'llama_index.query_engine.RetrieverQueryEngine', 'RetrieverQueryEngine', ([], {'retriever': 'retriever', 'response_synthesizer': 'response_synthesizer'}), '(retriever=retriever, response_synthesizer=\n response_synthesizer)\n', (18195, 18264), False, 'from llama_index.query_engine import RetrieverQueryEngine, MultiStepQueryEngine\n'), ((18327, 18554), 'llama_index.query_engine.MultiStepQueryEngine', 'MultiStepQueryEngine', ([], {'query_engine': 'query_engine', 'query_transform': 'step_decompose_transform', 'index_summary': '"""Provides information about everything you need to know about this topic, use this to answer the question."""'}), "(query_engine=query_engine, query_transform=\n step_decompose_transform, index_summary=\n 'Provides information about everything you need to know about this topic, use this to answer the question.'\n )\n", (18347, 18554), False, 'from llama_index.query_engine import RetrieverQueryEngine, MultiStepQueryEngine\n'), ((3141, 3164), 'discord.Color.blurple', 'discord.Color.blurple', ([], {}), '()\n', (3162, 3164), False, 'import discord\n'), ((3542, 3565), 'discord.Color.blurple', 'discord.Color.blurple', ([], {}), '()\n', (3563, 3565), False, 'import discord\n'), ((3928, 3951), 'discord.Color.blurple', 'discord.Color.blurple', ([], {}), '()\n', (3949, 3951), False, 'import discord\n'), ((4325, 4348), 'discord.Color.blurple', 'discord.Color.blurple', ([], {}), '()\n', (4346, 4348), False, 'import discord\n'), ((4692, 4715), 'discord.Color.blurple', 'discord.Color.blurple', ([], {}), '()\n', (4713, 4715), False, 'import discord\n'), ((5080, 5103), 'discord.Color.blurple', 'discord.Color.blurple', ([], {}), '()\n', (5101, 5103), False, 'import discord\n'), ((5487, 5510), 'discord.Color.blurple', 'discord.Color.blurple', ([], {}), '()\n', (5508, 5510), False, 'import discord\n'), ((5684, 5751), 'llama_index.BeautifulSoupWebReader', 'BeautifulSoupWebReader', ([], {'website_extractor': 'DEFAULT_WEBSITE_EXTRACTOR'}), '(website_extractor=DEFAULT_WEBSITE_EXTRACTOR)\n', (5706, 5751), False, 'from llama_index import QuestionAnswerPrompt, GPTVectorStoreIndex, BeautifulSoupWebReader, Document, LLMPredictor, OpenAIEmbedding, SimpleDirectoryReader, MockEmbedding, ServiceContext, get_response_synthesizer\n'), ((6549, 6592), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': '[f.name]'}), '(input_files=[f.name])\n', (6570, 6592), False, 'from llama_index import QuestionAnswerPrompt, GPTVectorStoreIndex, BeautifulSoupWebReader, Document, LLMPredictor, OpenAIEmbedding, SimpleDirectoryReader, MockEmbedding, ServiceContext, get_response_synthesizer\n'), ((7897, 7918), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (7916, 7918), False, 'import traceback\n'), ((8064, 8085), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (8083, 8085), False, 'import traceback\n'), ((11410, 11431), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (11429, 11431), False, 'import traceback\n'), ((14711, 14754), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model_name': 'model'}), '(temperature=0, model_name=model)\n', (14721, 14754), False, 'from langchain.chat_models import ChatOpenAI\n'), ((2711, 2723), 'datetime.date.today', 'date.today', ([], {}), '()\n', (2721, 2723), False, 'from datetime import datetime, date\n'), ((2732, 2744), 'datetime.date.today', 'date.today', ([], {}), '()\n', (2742, 2744), False, 'from datetime import datetime, date\n'), ((6164, 6220), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".pdf"""', 'delete': '(False)'}), "(suffix='.pdf', delete=False)\n", (6191, 6220), False, 'import tempfile\n'), ((12525, 12548), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (12546, 12548), False, 'import aiohttp\n'), ((14421, 14442), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (14440, 14442), False, 'import traceback\n'), ((14825, 14859), 'tiktoken.encoding_for_model', 'tiktoken.encoding_for_model', (['model'], {}), '(model)\n', (14852, 14859), False, 'import tiktoken\n'), ((15245, 15279), 'tiktoken.encoding_for_model', 'tiktoken.encoding_for_model', (['model'], {}), '(model)\n', (15272, 15279), False, 'import tiktoken\n'), ((16269, 16377), 'functools.partial', 'partial', (['GPTVectorStoreIndex.from_documents', 'documents'], {'service_context': 'service_context', 'use_async': '(True)'}), '(GPTVectorStoreIndex.from_documents, documents, service_context=\n service_context, use_async=True)\n', (16276, 16377), False, 'from functools import partial\n'), ((17209, 17263), 'functools.partial', 'partial', (['graph_builder.build_from_documents', 'documents'], {}), '(graph_builder.build_from_documents, documents)\n', (17216, 17263), False, 'from functools import partial\n'), ((19989, 20016), 'functools.partial', 'partial', (['index.query', 'query'], {}), '(index.query, query)\n', (19996, 20016), False, 'from functools import partial\n'), ((18735, 18779), 'functools.partial', 'partial', (['multistep_query_engine.query', 'query'], {}), '(multistep_query_engine.query, query)\n', (18742, 18779), False, 'from functools import partial\n'), ((18923, 18957), 'functools.partial', 'partial', (['query_engine.query', 'query'], {}), '(query_engine.query, query)\n', (18930, 18957), False, 'from functools import partial\n'), ((2534, 2556), 'services.environment_service.EnvService.save_path', 'EnvService.save_path', ([], {}), '()\n', (2554, 2556), False, 'from services.environment_service import EnvService\n'), ((2828, 2850), 'services.environment_service.EnvService.save_path', 'EnvService.save_path', ([], {}), '()\n', (2848, 2850), False, 'from services.environment_service import EnvService\n'), ((14174, 14207), 'functools.partial', 'partial', (['self.index_webpage', 'link'], {}), '(self.index_webpage, link)\n', (14181, 14207), False, 'from functools import partial\n'), ((10093, 10107), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (10105, 10107), False, 'from datetime import datetime, date\n'), ((10513, 10527), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (10525, 10527), False, 'from datetime import datetime, date\n')]
from typing import List from llama_index import Document, StorageContext, VectorStoreIndex, load_index_from_storage import os def create_vector(service_context, vector_storage_dir: str, doc_loader: callable) -> List[Document]: if not os.path.exists(vector_storage_dir): documents = doc_loader() print(f"About to build vector-index over {len(documents)} document(s) ...") vector_index = VectorStoreIndex.from_documents( documents, service_context=service_context ) print(f"Storing vector-index to {vector_storage_dir} ...") vector_index.storage_context.persist(persist_dir=vector_storage_dir) else: print(f"Loading vector-index from storage from {vector_storage_dir} ...") storage_context_vector = StorageContext.from_defaults(persist_dir=vector_storage_dir) vector_index = load_index_from_storage( service_context=service_context, storage_context=storage_context_vector ) return vector_index
[ "llama_index.VectorStoreIndex.from_documents", "llama_index.StorageContext.from_defaults", "llama_index.load_index_from_storage" ]
[((240, 274), 'os.path.exists', 'os.path.exists', (['vector_storage_dir'], {}), '(vector_storage_dir)\n', (254, 274), False, 'import os\n'), ((416, 491), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (447, 491), False, 'from llama_index import Document, StorageContext, VectorStoreIndex, load_index_from_storage\n'), ((795, 855), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'vector_storage_dir'}), '(persist_dir=vector_storage_dir)\n', (823, 855), False, 'from llama_index import Document, StorageContext, VectorStoreIndex, load_index_from_storage\n'), ((879, 980), 'llama_index.load_index_from_storage', 'load_index_from_storage', ([], {'service_context': 'service_context', 'storage_context': 'storage_context_vector'}), '(service_context=service_context, storage_context=\n storage_context_vector)\n', (902, 980), False, 'from llama_index import Document, StorageContext, VectorStoreIndex, load_index_from_storage\n')]
import streamlit as st import torch from glob import glob from pathlib import Path from llama_index.prompts.prompts import SimpleInputPrompt from llama_index import ( set_global_service_context, ServiceContext, VectorStoreIndex, download_loader, ) from langchain.embeddings import HuggingFaceEmbeddings from llama_index.embeddings import LangchainEmbedding from transformers import AutoTokenizer, AutoModelForCausalLM from llama_index.llms import HuggingFaceLLM PyMuPDFReader = download_loader("PyMuPDFReader") loader = PyMuPDFReader() model_name = "meta-llama/Llama-2-7b-chat-hf" auth_token = "*******************************" system_prompt = """<s>[INST] <<SYS>> You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information. <</SYS>> """ # Llama2's official system prompt @st.cache_resource def model_tokenizer_embedder(model_name, auth_token): tokenizer = AutoTokenizer.from_pretrained( model_name, cache_dir="./model/", use_auth_token=auth_token ) model = AutoModelForCausalLM.from_pretrained( model_name, cache_dir="./model/", use_auth_token=auth_token, torch_dtype=torch.float16, load_in_8bit=True, ) embedding_llm = LangchainEmbedding( HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2") ) return tokenizer, model, embedding_llm def load_documents(directory): documents = [] for item_path in glob(directory + "*.pdf"): # documents.extend(loader.load(file_path=Path(item_path), metadata=True)) documents.extend(loader.load(file_path=item_path, metadata=True)) return documents tokenizer, model, embedding_llm = model_tokenizer_embedder(model_name, auth_token) query_wrapper_prompt = SimpleInputPrompt("{query_str} [/INST]") llm = HuggingFaceLLM( context_window=4096, max_new_tokens=256, system_prompt=system_prompt, query_wrapper_prompt=query_wrapper_prompt, model=model, tokenizer=tokenizer, ) service_context = ServiceContext.from_defaults( chunk_size=1024, llm=llm, embed_model=embedding_llm ) set_global_service_context(service_context) documents = load_documents("./documents/") index = VectorStoreIndex.from_documents(documents) query_engine = index.as_query_engine() st.title("Llama2 RAG") prompt = st.text_input("Enter your prompt") if prompt: response = query_engine.query(prompt) st.write(response.response)
[ "llama_index.VectorStoreIndex.from_documents", "llama_index.ServiceContext.from_defaults", "llama_index.download_loader", "llama_index.llms.HuggingFaceLLM", "llama_index.prompts.prompts.SimpleInputPrompt", "llama_index.set_global_service_context" ]
[((495, 527), 'llama_index.download_loader', 'download_loader', (['"""PyMuPDFReader"""'], {}), "('PyMuPDFReader')\n", (510, 527), False, 'from llama_index import set_global_service_context, ServiceContext, VectorStoreIndex, download_loader\n'), ((2176, 2216), 'llama_index.prompts.prompts.SimpleInputPrompt', 'SimpleInputPrompt', (['"""{query_str} [/INST]"""'], {}), "('{query_str} [/INST]')\n", (2193, 2216), False, 'from llama_index.prompts.prompts import SimpleInputPrompt\n'), ((2224, 2394), 'llama_index.llms.HuggingFaceLLM', 'HuggingFaceLLM', ([], {'context_window': '(4096)', 'max_new_tokens': '(256)', 'system_prompt': 'system_prompt', 'query_wrapper_prompt': 'query_wrapper_prompt', 'model': 'model', 'tokenizer': 'tokenizer'}), '(context_window=4096, max_new_tokens=256, system_prompt=\n system_prompt, query_wrapper_prompt=query_wrapper_prompt, model=model,\n tokenizer=tokenizer)\n', (2238, 2394), False, 'from llama_index.llms import HuggingFaceLLM\n'), ((2432, 2518), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'chunk_size': '(1024)', 'llm': 'llm', 'embed_model': 'embedding_llm'}), '(chunk_size=1024, llm=llm, embed_model=\n embedding_llm)\n', (2460, 2518), False, 'from llama_index import set_global_service_context, ServiceContext, VectorStoreIndex, download_loader\n'), ((2520, 2563), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (2546, 2563), False, 'from llama_index import set_global_service_context, ServiceContext, VectorStoreIndex, download_loader\n'), ((2616, 2658), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (2647, 2658), False, 'from llama_index import set_global_service_context, ServiceContext, VectorStoreIndex, download_loader\n'), ((2699, 2721), 'streamlit.title', 'st.title', (['"""Llama2 RAG"""'], {}), "('Llama2 RAG')\n", (2707, 2721), True, 'import streamlit as st\n'), ((2732, 2766), 'streamlit.text_input', 'st.text_input', (['"""Enter your prompt"""'], {}), "('Enter your prompt')\n", (2745, 2766), True, 'import streamlit as st\n'), ((1330, 1424), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['model_name'], {'cache_dir': '"""./model/"""', 'use_auth_token': 'auth_token'}), "(model_name, cache_dir='./model/',\n use_auth_token=auth_token)\n", (1359, 1424), False, 'from transformers import AutoTokenizer, AutoModelForCausalLM\n'), ((1447, 1594), 'transformers.AutoModelForCausalLM.from_pretrained', 'AutoModelForCausalLM.from_pretrained', (['model_name'], {'cache_dir': '"""./model/"""', 'use_auth_token': 'auth_token', 'torch_dtype': 'torch.float16', 'load_in_8bit': '(True)'}), "(model_name, cache_dir='./model/',\n use_auth_token=auth_token, torch_dtype=torch.float16, load_in_8bit=True)\n", (1483, 1594), False, 'from transformers import AutoTokenizer, AutoModelForCausalLM\n'), ((1863, 1888), 'glob.glob', 'glob', (["(directory + '*.pdf')"], {}), "(directory + '*.pdf')\n", (1867, 1888), False, 'from glob import glob\n'), ((2824, 2851), 'streamlit.write', 'st.write', (['response.response'], {}), '(response.response)\n', (2832, 2851), True, 'import streamlit as st\n'), ((1687, 1739), 'langchain.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '"""all-MiniLM-L6-v2"""'}), "(model_name='all-MiniLM-L6-v2')\n", (1708, 1739), False, 'from langchain.embeddings import HuggingFaceEmbeddings\n')]
from langchain.agents import load_tools, Tool, tool from langchain.agents import initialize_agent from langchain.llms import OpenAI, OpenAIChat from langchain.embeddings.openai import OpenAIEmbeddings from langchain.vectorstores import Qdrant, Chroma, Pinecone, FAISS from langchain.text_splitter import CharacterTextSplitter from langchain.document_loaders import TextLoader from langchain.chains import RetrievalQA from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, Document from llama_index.readers.qdrant import QdrantReader from llama_index.optimization.optimizer import SentenceEmbeddingOptimizer from gptcache import cache from gptcache.adapter import openai import pinecone, warnings, yaml, os warnings.filterwarnings("ignore") class KapwingVectorStore: def __init__(self, chunk_size=10000, model_name="gpt-4", temperature=0, filepath='data/query_instructions.txt'): cache.init() cache.set_openai_key() with open("config/config.yaml", 'r') as stream: config = yaml.safe_load(stream) os.environ["OPENAI_API_KEY"] = config['OPENAI_API_KEY'] self.qdrant_host = config['QDRANT_HOST'] self.qdrant_api_key = config['QDRANT_API_KEY'] self.pcone_api_key = config['PINECONE_API_KEY'] self.pcone_env = config['PINECONE_HOST'] self.text_splitter = CharacterTextSplitter(separator="\n\n\n", chunk_size=chunk_size, chunk_overlap=0) self.loader = TextLoader(filepath) self.docs = self.text_splitter.split_documents(self.loader.load()) self.embeddings = OpenAIEmbeddings() self.llm_ = OpenAIChat(model_name=model_name, temperature=temperature) with open("config/prompts.yaml", 'r') as stream: prompts = yaml.safe_load(stream) self.prefix = prompts['vectorstore_prefix'] self.suffix = prompts['vectorstore_suffix'] self.simple_scripts_prompt = prompts['simple_scripts_prompt'] self.mask_prompt = prompts['mask_prompt'] self.fast_func_prompt = prompts['fast_func_prompt'] def get_qdrant(self): self.qdrant_tool_db = Qdrant.from_documents(self.docs, self.embeddings, host=self.qdrant_host, prefer_grpc=True, api_key=self.qdrant_api_key).as_retriever() self.qdrant_tool_vec = RetrievalQA.from_llm(llm=self.llm_, retriever=self.qdrant_tool_db) return self.qdrant_tool_db, self.qdrant_tool_vec def get_faiss(self): self.faiss_tool_db = FAISS.from_documents(self.docs, self.embeddings).as_retriever() self.faiss_tool_vec = RetrievalQA.from_llm(llm=self.llm_, retriever=self.faiss_tool_db) return self.faiss_tool_db, self.faiss_tool_vec def get_chroma(self): self.chroma_tool_db = Chroma.from_documents(self.docs, self.embeddings, collection_name="tools").as_retriever() self.chroma_tool_vec = RetrievalQA.from_llm(llm=self.llm_, retriever=self.chroma_tool_db) return self.chroma_tool_db, self.chroma_tool_vec def get_pcone(self): pinecone.init(api_key=self.pcone_api_key, environment=self.pcone_env) self.pcone_tool_db = Pinecone.from_documents(self.docs, self.embeddings, index_name="tool-db").as_retriever() self.pcone_tool_vec = RetrievalQA.from_llm(llm=self.llm_, retriever=self.pcone_tool_db) return self.pcone_tool_db, self.pcone_tool_vec def set_gpt_index(self): self.gpt_docs = [Document(doc.page_content) for doc in self.docs] self.tool_index = GPTSimpleVectorIndex.from_documents(self.gpt_docs) def gpt_index_query(self, query): res = self.tool_index.query(self.prefix.format(query=query) + self.mask_prompt + self.suffix, similarity_top_k=3 # optimizer=SentenceEmbeddingOptimizer(percentile_cutoff=0.3) ) return res def gpt_index_funcs(self, query): res = self.tool_index.query(self.fast_func_prompt.format(query=query), similarity_top_k=3 # optimizer=SentenceEmbeddingOptimizer(percentile_cutoff=0.3) ) return res def gpt_index_scripts_query(self, query): res = self.tool_index.query(self.simple_scripts_prompt.format(query=query) + self.mask_prompt, # similarity_top_k=3, # optimizer=SentenceEmbeddingOptimizer(percentile_cutoff=0.3) ) return res def qdrant_query(self, query): res = self.qdrant_tool_vec.run(self.prefix.format(query=query) + self.suffix) return res def pcone_query(self, query): res = self.pcone_tool_vec.run(self.prefix.format(query=query) + self.suffix) return res def faiss_query(self, query): res = self.faiss_tool_vec.run(self.prefix.format(query=query) + self.suffix) return res def faiss_scripts_query(self, query): res = self.faiss_tool_vec.run(self.simple_scripts_prompt.format(query=query) + self.mask_prompt) return res def main(): query = input("QUERY: ") vec = KapwingVectorStore() vec.get_faiss() res = vec.faiss_query(query) print(res) if __name__ == "__main__": main()
[ "llama_index.GPTSimpleVectorIndex.from_documents", "llama_index.Document" ]
[((721, 754), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (744, 754), False, 'import pinecone, warnings, yaml, os\n'), ((978, 990), 'gptcache.cache.init', 'cache.init', ([], {}), '()\n', (988, 990), False, 'from gptcache import cache\n'), ((999, 1021), 'gptcache.cache.set_openai_key', 'cache.set_openai_key', ([], {}), '()\n', (1019, 1021), False, 'from gptcache import cache\n'), ((1435, 1520), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'separator': '"""\n\n\n"""', 'chunk_size': 'chunk_size', 'chunk_overlap': '(0)'}), "(separator='\\n\\n\\n', chunk_size=chunk_size,\n chunk_overlap=0)\n", (1456, 1520), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((1539, 1559), 'langchain.document_loaders.TextLoader', 'TextLoader', (['filepath'], {}), '(filepath)\n', (1549, 1559), False, 'from langchain.document_loaders import TextLoader\n'), ((1662, 1680), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1678, 1680), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((1701, 1759), 'langchain.llms.OpenAIChat', 'OpenAIChat', ([], {'model_name': 'model_name', 'temperature': 'temperature'}), '(model_name=model_name, temperature=temperature)\n', (1711, 1759), False, 'from langchain.llms import OpenAI, OpenAIChat\n'), ((2379, 2445), 'langchain.chains.RetrievalQA.from_llm', 'RetrievalQA.from_llm', ([], {'llm': 'self.llm_', 'retriever': 'self.qdrant_tool_db'}), '(llm=self.llm_, retriever=self.qdrant_tool_db)\n', (2399, 2445), False, 'from langchain.chains import RetrievalQA\n'), ((2652, 2717), 'langchain.chains.RetrievalQA.from_llm', 'RetrievalQA.from_llm', ([], {'llm': 'self.llm_', 'retriever': 'self.faiss_tool_db'}), '(llm=self.llm_, retriever=self.faiss_tool_db)\n', (2672, 2717), False, 'from langchain.chains import RetrievalQA\n'), ((2951, 3017), 'langchain.chains.RetrievalQA.from_llm', 'RetrievalQA.from_llm', ([], {'llm': 'self.llm_', 'retriever': 'self.chroma_tool_db'}), '(llm=self.llm_, retriever=self.chroma_tool_db)\n', (2971, 3017), False, 'from langchain.chains import RetrievalQA\n'), ((3109, 3178), 'pinecone.init', 'pinecone.init', ([], {'api_key': 'self.pcone_api_key', 'environment': 'self.pcone_env'}), '(api_key=self.pcone_api_key, environment=self.pcone_env)\n', (3122, 3178), False, 'import pinecone, warnings, yaml, os\n'), ((3327, 3392), 'langchain.chains.RetrievalQA.from_llm', 'RetrievalQA.from_llm', ([], {'llm': 'self.llm_', 'retriever': 'self.pcone_tool_db'}), '(llm=self.llm_, retriever=self.pcone_tool_db)\n', (3347, 3392), False, 'from langchain.chains import RetrievalQA\n'), ((3578, 3628), 'llama_index.GPTSimpleVectorIndex.from_documents', 'GPTSimpleVectorIndex.from_documents', (['self.gpt_docs'], {}), '(self.gpt_docs)\n', (3613, 3628), False, 'from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, Document\n'), ((1108, 1130), 'yaml.safe_load', 'yaml.safe_load', (['stream'], {}), '(stream)\n', (1122, 1130), False, 'import pinecone, warnings, yaml, os\n'), ((1848, 1870), 'yaml.safe_load', 'yaml.safe_load', (['stream'], {}), '(stream)\n', (1862, 1870), False, 'import pinecone, warnings, yaml, os\n'), ((3503, 3529), 'llama_index.Document', 'Document', (['doc.page_content'], {}), '(doc.page_content)\n', (3511, 3529), False, 'from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, Document\n'), ((2213, 2336), 'langchain.vectorstores.Qdrant.from_documents', 'Qdrant.from_documents', (['self.docs', 'self.embeddings'], {'host': 'self.qdrant_host', 'prefer_grpc': '(True)', 'api_key': 'self.qdrant_api_key'}), '(self.docs, self.embeddings, host=self.qdrant_host,\n prefer_grpc=True, api_key=self.qdrant_api_key)\n', (2234, 2336), False, 'from langchain.vectorstores import Qdrant, Chroma, Pinecone, FAISS\n'), ((2558, 2606), 'langchain.vectorstores.FAISS.from_documents', 'FAISS.from_documents', (['self.docs', 'self.embeddings'], {}), '(self.docs, self.embeddings)\n', (2578, 2606), False, 'from langchain.vectorstores import Qdrant, Chroma, Pinecone, FAISS\n'), ((2830, 2904), 'langchain.vectorstores.Chroma.from_documents', 'Chroma.from_documents', (['self.docs', 'self.embeddings'], {'collection_name': '"""tools"""'}), "(self.docs, self.embeddings, collection_name='tools')\n", (2851, 2904), False, 'from langchain.vectorstores import Qdrant, Chroma, Pinecone, FAISS\n'), ((3208, 3281), 'langchain.vectorstores.Pinecone.from_documents', 'Pinecone.from_documents', (['self.docs', 'self.embeddings'], {'index_name': '"""tool-db"""'}), "(self.docs, self.embeddings, index_name='tool-db')\n", (3231, 3281), False, 'from langchain.vectorstores import Qdrant, Chroma, Pinecone, FAISS\n')]
import os, config, openai from llama_index import StorageContext, load_index_from_storage openai.api_key = config.OPENAI_API_KEY os.environ['OPENAI_API_KEY'] = config.OPENAI_API_KEY # new version of llama index uses StorageContext instead of load_from_disk # index = GPTSimpleVectorIndex.load_from_disk('index_news.json') storage_context = StorageContext.from_defaults(persist_dir="./storage") index = load_index_from_storage(storage_context) # new version of llama index uses query_engine.query() query_engine = index.as_query_engine() response = query_engine.query("What are some near-term risks to Nvidia?") print(response)
[ "llama_index.StorageContext.from_defaults", "llama_index.load_index_from_storage" ]
[((342, 395), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""./storage"""'}), "(persist_dir='./storage')\n", (370, 395), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((404, 444), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (427, 444), False, 'from llama_index import StorageContext, load_index_from_storage\n')]
import os from dotenv import load_dotenv, find_dotenv import numpy as np import nest_asyncio nest_asyncio.apply() def get_openai_api_key(): _ = load_dotenv(find_dotenv()) return os.getenv("OPENAI_API_KEY") from trulens_eval import ( Feedback, TruLlama, OpenAI ) from trulens_eval.feedback import Groundedness def get_prebuilt_trulens_recorder(query_engine, app_id): openai = OpenAI() qa_relevance = ( Feedback(openai.relevance_with_cot_reasons, name="Answer Relevance") .on_input_output() ) qs_relevance = ( Feedback(openai.relevance_with_cot_reasons, name = "Context Relevance") .on_input() .on(TruLlama.select_source_nodes().node.text) .aggregate(np.mean) ) # grounded = Groundedness(groundedness_provider=openai, summarize_provider=openai) grounded = Groundedness(groundedness_provider=openai) groundedness = ( Feedback(grounded.groundedness_measure_with_cot_reasons, name="Groundedness") .on(TruLlama.select_source_nodes().node.text) .on_output() .aggregate(grounded.grounded_statements_aggregator) ) feedbacks = [qa_relevance, qs_relevance, groundedness] tru_recorder = TruLlama( query_engine, app_id=app_id, feedbacks=feedbacks ) return tru_recorder from llama_index import ServiceContext, VectorStoreIndex, StorageContext from llama_index.node_parser import SentenceWindowNodeParser from llama_index.indices.postprocessor import MetadataReplacementPostProcessor from llama_index.indices.postprocessor import SentenceTransformerRerank from llama_index import load_index_from_storage import os def build_sentence_window_index( documents, llm, embed_model="local:BAAI/bge-small-en-v1.5", sentence_window_size=3, save_dir="sentence_index", ): # create the sentence window node parser w/ default settings node_parser = SentenceWindowNodeParser.from_defaults( window_size=sentence_window_size, window_metadata_key="window", original_text_metadata_key="original_text", ) sentence_context = ServiceContext.from_defaults( llm=llm, embed_model=embed_model, node_parser=node_parser, ) if not os.path.exists(save_dir): sentence_index = VectorStoreIndex.from_documents( documents, service_context=sentence_context ) sentence_index.storage_context.persist(persist_dir=save_dir) else: sentence_index = load_index_from_storage( StorageContext.from_defaults(persist_dir=save_dir), service_context=sentence_context, ) return sentence_index def get_sentence_window_query_engine( sentence_index, similarity_top_k=6, rerank_top_n=2, ): # define postprocessors postproc = MetadataReplacementPostProcessor(target_metadata_key="window") rerank = SentenceTransformerRerank( top_n=rerank_top_n, model="BAAI/bge-reranker-base" ) sentence_window_engine = sentence_index.as_query_engine( similarity_top_k=similarity_top_k, node_postprocessors=[postproc, rerank] ) return sentence_window_engine from llama_index.node_parser import HierarchicalNodeParser from llama_index.node_parser import get_leaf_nodes from llama_index import StorageContext from llama_index.retrievers import AutoMergingRetriever from llama_index.indices.postprocessor import SentenceTransformerRerank from llama_index.query_engine import RetrieverQueryEngine def build_automerging_index( documents, llm, embed_model="local:BAAI/bge-small-en-v1.5", save_dir="merging_index", chunk_sizes=None, ): chunk_sizes = chunk_sizes or [2048, 512, 128] node_parser = HierarchicalNodeParser.from_defaults(chunk_sizes=chunk_sizes) nodes = node_parser.get_nodes_from_documents(documents) leaf_nodes = get_leaf_nodes(nodes) merging_context = ServiceContext.from_defaults( llm=llm, embed_model=embed_model, ) storage_context = StorageContext.from_defaults() storage_context.docstore.add_documents(nodes) if not os.path.exists(save_dir): automerging_index = VectorStoreIndex( leaf_nodes, storage_context=storage_context, service_context=merging_context ) automerging_index.storage_context.persist(persist_dir=save_dir) else: automerging_index = load_index_from_storage( StorageContext.from_defaults(persist_dir=save_dir), service_context=merging_context, ) return automerging_index def get_automerging_query_engine( automerging_index, similarity_top_k=12, rerank_top_n=6, ): base_retriever = automerging_index.as_retriever(similarity_top_k=similarity_top_k) retriever = AutoMergingRetriever( base_retriever, automerging_index.storage_context, verbose=True ) rerank = SentenceTransformerRerank( top_n=rerank_top_n, model="BAAI/bge-reranker-base" ) auto_merging_engine = RetrieverQueryEngine.from_args( retriever, node_postprocessors=[rerank] ) return auto_merging_engine
[ "llama_index.VectorStoreIndex.from_documents", "llama_index.retrievers.AutoMergingRetriever", "llama_index.node_parser.get_leaf_nodes", "llama_index.ServiceContext.from_defaults", "llama_index.StorageContext.from_defaults", "llama_index.node_parser.SentenceWindowNodeParser.from_defaults", "llama_index.VectorStoreIndex", "llama_index.node_parser.HierarchicalNodeParser.from_defaults", "llama_index.query_engine.RetrieverQueryEngine.from_args", "llama_index.indices.postprocessor.MetadataReplacementPostProcessor", "llama_index.indices.postprocessor.SentenceTransformerRerank" ]
[((96, 116), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (114, 116), False, 'import nest_asyncio\n'), ((192, 219), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (201, 219), False, 'import os\n'), ((408, 416), 'trulens_eval.OpenAI', 'OpenAI', ([], {}), '()\n', (414, 416), False, 'from trulens_eval import Feedback, TruLlama, OpenAI\n'), ((862, 904), 'trulens_eval.feedback.Groundedness', 'Groundedness', ([], {'groundedness_provider': 'openai'}), '(groundedness_provider=openai)\n', (874, 904), False, 'from trulens_eval.feedback import Groundedness\n'), ((1245, 1303), 'trulens_eval.TruLlama', 'TruLlama', (['query_engine'], {'app_id': 'app_id', 'feedbacks': 'feedbacks'}), '(query_engine, app_id=app_id, feedbacks=feedbacks)\n', (1253, 1303), False, 'from trulens_eval import Feedback, TruLlama, OpenAI\n'), ((1954, 2104), 'llama_index.node_parser.SentenceWindowNodeParser.from_defaults', 'SentenceWindowNodeParser.from_defaults', ([], {'window_size': 'sentence_window_size', 'window_metadata_key': '"""window"""', 'original_text_metadata_key': '"""original_text"""'}), "(window_size=sentence_window_size,\n window_metadata_key='window', original_text_metadata_key='original_text')\n", (1992, 2104), False, 'from llama_index.node_parser import SentenceWindowNodeParser\n'), ((2155, 2247), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'node_parser': 'node_parser'}), '(llm=llm, embed_model=embed_model, node_parser=\n node_parser)\n', (2183, 2247), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext\n'), ((2861, 2923), 'llama_index.indices.postprocessor.MetadataReplacementPostProcessor', 'MetadataReplacementPostProcessor', ([], {'target_metadata_key': '"""window"""'}), "(target_metadata_key='window')\n", (2893, 2923), False, 'from llama_index.indices.postprocessor import MetadataReplacementPostProcessor\n'), ((2937, 3014), 'llama_index.indices.postprocessor.SentenceTransformerRerank', 'SentenceTransformerRerank', ([], {'top_n': 'rerank_top_n', 'model': '"""BAAI/bge-reranker-base"""'}), "(top_n=rerank_top_n, model='BAAI/bge-reranker-base')\n", (2962, 3014), False, 'from llama_index.indices.postprocessor import SentenceTransformerRerank\n'), ((3777, 3838), 'llama_index.node_parser.HierarchicalNodeParser.from_defaults', 'HierarchicalNodeParser.from_defaults', ([], {'chunk_sizes': 'chunk_sizes'}), '(chunk_sizes=chunk_sizes)\n', (3813, 3838), False, 'from llama_index.node_parser import HierarchicalNodeParser\n'), ((3916, 3937), 'llama_index.node_parser.get_leaf_nodes', 'get_leaf_nodes', (['nodes'], {}), '(nodes)\n', (3930, 3937), False, 'from llama_index.node_parser import get_leaf_nodes\n'), ((3960, 4022), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model'}), '(llm=llm, embed_model=embed_model)\n', (3988, 4022), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext\n'), ((4068, 4098), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {}), '()\n', (4096, 4098), False, 'from llama_index import StorageContext\n'), ((4825, 4914), 'llama_index.retrievers.AutoMergingRetriever', 'AutoMergingRetriever', (['base_retriever', 'automerging_index.storage_context'], {'verbose': '(True)'}), '(base_retriever, automerging_index.storage_context,\n verbose=True)\n', (4845, 4914), False, 'from llama_index.retrievers import AutoMergingRetriever\n'), ((4938, 5015), 'llama_index.indices.postprocessor.SentenceTransformerRerank', 'SentenceTransformerRerank', ([], {'top_n': 'rerank_top_n', 'model': '"""BAAI/bge-reranker-base"""'}), "(top_n=rerank_top_n, model='BAAI/bge-reranker-base')\n", (4963, 5015), False, 'from llama_index.indices.postprocessor import SentenceTransformerRerank\n'), ((5056, 5127), 'llama_index.query_engine.RetrieverQueryEngine.from_args', 'RetrieverQueryEngine.from_args', (['retriever'], {'node_postprocessors': '[rerank]'}), '(retriever, node_postprocessors=[rerank])\n', (5086, 5127), False, 'from llama_index.query_engine import RetrieverQueryEngine\n'), ((165, 178), 'dotenv.find_dotenv', 'find_dotenv', ([], {}), '()\n', (176, 178), False, 'from dotenv import load_dotenv, find_dotenv\n'), ((2285, 2309), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (2299, 2309), False, 'import os\n'), ((2336, 2412), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'service_context': 'sentence_context'}), '(documents, service_context=sentence_context)\n', (2367, 2412), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext\n'), ((4161, 4185), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (4175, 4185), False, 'import os\n'), ((4215, 4313), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', (['leaf_nodes'], {'storage_context': 'storage_context', 'service_context': 'merging_context'}), '(leaf_nodes, storage_context=storage_context,\n service_context=merging_context)\n', (4231, 4313), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext\n'), ((447, 515), 'trulens_eval.Feedback', 'Feedback', (['openai.relevance_with_cot_reasons'], {'name': '"""Answer Relevance"""'}), "(openai.relevance_with_cot_reasons, name='Answer Relevance')\n", (455, 515), False, 'from trulens_eval import Feedback, TruLlama, OpenAI\n'), ((2576, 2626), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'save_dir'}), '(persist_dir=save_dir)\n', (2604, 2626), False, 'from llama_index import StorageContext\n'), ((4479, 4529), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'save_dir'}), '(persist_dir=save_dir)\n', (4507, 4529), False, 'from llama_index import StorageContext\n'), ((683, 713), 'trulens_eval.TruLlama.select_source_nodes', 'TruLlama.select_source_nodes', ([], {}), '()\n', (711, 713), False, 'from trulens_eval import Feedback, TruLlama, OpenAI\n'), ((579, 648), 'trulens_eval.Feedback', 'Feedback', (['openai.relevance_with_cot_reasons'], {'name': '"""Context Relevance"""'}), "(openai.relevance_with_cot_reasons, name='Context Relevance')\n", (587, 648), False, 'from trulens_eval import Feedback, TruLlama, OpenAI\n'), ((935, 1012), 'trulens_eval.Feedback', 'Feedback', (['grounded.groundedness_measure_with_cot_reasons'], {'name': '"""Groundedness"""'}), "(grounded.groundedness_measure_with_cot_reasons, name='Groundedness')\n", (943, 1012), False, 'from trulens_eval import Feedback, TruLlama, OpenAI\n'), ((1029, 1059), 'trulens_eval.TruLlama.select_source_nodes', 'TruLlama.select_source_nodes', ([], {}), '()\n', (1057, 1059), False, 'from trulens_eval import Feedback, TruLlama, OpenAI\n')]
import streamlit as st from llama_index import VectorStoreIndex, ServiceContext from llama_index.llms import Perplexity from llama_index import SimpleDirectoryReader @st.cache_resource(show_spinner=True) def load_data(): with st.spinner(text="Die LSB-Informationen werden indiziert. Das dauert nur ein paar Augenblicke."): reader = SimpleDirectoryReader(input_dir="chatbot/data", recursive=True) docs = reader.load_data() service_context = ServiceContext.from_defaults(llm=llm) index = VectorStoreIndex.from_documents(docs, service_context=service_context) return index pplx_api_key = st.secrets.pplx_key llm = Perplexity( api_key=pplx_api_key, model="pplx-70b-chat", temperature=0.4, system_prompt="Du bist ein Experte für die Leipziger Städtischen Bibliotheken. Du hilfst Nutzerinnen und Nutzern dabei, die Bibliothek zu benutzen. Du beantwortest Fragen zum Ausleihbetrieb, zu den Standorten und den verfügbaren Services. Deine Antworten sollen auf Fakten basieren. Halluziniere keine Informationen über die Bibliotheken, die nicht auf Fakten basieren. Wenn Du eine Information über die Bibliotheken nicht hast, sage den Nutzenden, dass Du Ihnen nicht weiterhelfen kannst. Antworte auf Deutsch." ) st.header("Der LSB-Service-Chat 💬 📚") if "messages" not in st.session_state.keys(): # Initialize the chat message history st.session_state.messages = [ {"role": "assistant", "content": "Was möchten Sie über die Leipziger Städtischen Bibliotheken wissen?"} ] index = load_data() chat_engine = index.as_chat_engine(chat_mode="condense_question", verbose=True) if prompt := st.chat_input("Ihre Frage"): # Prompt for user input and save to chat history st.session_state.messages.append({"role": "user", "content": prompt}) for message in st.session_state.messages: # Display the prior chat messages with st.chat_message(message["role"]): st.write(message["content"]) # If last message is not from assistant, generate a new response if st.session_state.messages[-1]["role"] != "assistant": with st.chat_message("assistant"): with st.spinner("Ich denke nach ..."): response = chat_engine.chat(prompt) st.write(response.response) message = {"role": "assistant", "content": response.response} st.session_state.messages.append(message) # Add response to message history
[ "llama_index.VectorStoreIndex.from_documents", "llama_index.llms.Perplexity", "llama_index.ServiceContext.from_defaults", "llama_index.SimpleDirectoryReader" ]
[((168, 204), 'streamlit.cache_resource', 'st.cache_resource', ([], {'show_spinner': '(True)'}), '(show_spinner=True)\n', (185, 204), True, 'import streamlit as st\n'), ((657, 1257), 'llama_index.llms.Perplexity', 'Perplexity', ([], {'api_key': 'pplx_api_key', 'model': '"""pplx-70b-chat"""', 'temperature': '(0.4)', 'system_prompt': '"""Du bist ein Experte für die Leipziger Städtischen Bibliotheken. Du hilfst Nutzerinnen und Nutzern dabei, die Bibliothek zu benutzen. Du beantwortest Fragen zum Ausleihbetrieb, zu den Standorten und den verfügbaren Services. Deine Antworten sollen auf Fakten basieren. Halluziniere keine Informationen über die Bibliotheken, die nicht auf Fakten basieren. Wenn Du eine Information über die Bibliotheken nicht hast, sage den Nutzenden, dass Du Ihnen nicht weiterhelfen kannst. Antworte auf Deutsch."""'}), "(api_key=pplx_api_key, model='pplx-70b-chat', temperature=0.4,\n system_prompt=\n 'Du bist ein Experte für die Leipziger Städtischen Bibliotheken. Du hilfst Nutzerinnen und Nutzern dabei, die Bibliothek zu benutzen. Du beantwortest Fragen zum Ausleihbetrieb, zu den Standorten und den verfügbaren Services. Deine Antworten sollen auf Fakten basieren. Halluziniere keine Informationen über die Bibliotheken, die nicht auf Fakten basieren. Wenn Du eine Information über die Bibliotheken nicht hast, sage den Nutzenden, dass Du Ihnen nicht weiterhelfen kannst. Antworte auf Deutsch.'\n )\n", (667, 1257), False, 'from llama_index.llms import Perplexity\n'), ((1251, 1288), 'streamlit.header', 'st.header', (['"""Der LSB-Service-Chat 💬 📚"""'], {}), "('Der LSB-Service-Chat 💬 📚')\n", (1260, 1288), True, 'import streamlit as st\n'), ((1311, 1334), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (1332, 1334), True, 'import streamlit as st\n'), ((1643, 1670), 'streamlit.chat_input', 'st.chat_input', (['"""Ihre Frage"""'], {}), "('Ihre Frage')\n", (1656, 1670), True, 'import streamlit as st\n'), ((1725, 1794), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (1757, 1794), True, 'import streamlit as st\n'), ((231, 336), 'streamlit.spinner', 'st.spinner', ([], {'text': '"""Die LSB-Informationen werden indiziert. Das dauert nur ein paar Augenblicke."""'}), "(text=\n 'Die LSB-Informationen werden indiziert. Das dauert nur ein paar Augenblicke.'\n )\n", (241, 336), True, 'import streamlit as st\n'), ((345, 408), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_dir': '"""chatbot/data"""', 'recursive': '(True)'}), "(input_dir='chatbot/data', recursive=True)\n", (366, 408), False, 'from llama_index import SimpleDirectoryReader\n'), ((469, 506), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm'}), '(llm=llm)\n', (497, 506), False, 'from llama_index import VectorStoreIndex, ServiceContext\n'), ((523, 593), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['docs'], {'service_context': 'service_context'}), '(docs, service_context=service_context)\n', (554, 593), False, 'from llama_index import VectorStoreIndex, ServiceContext\n'), ((1881, 1913), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (1896, 1913), True, 'import streamlit as st\n'), ((1923, 1951), 'streamlit.write', 'st.write', (["message['content']"], {}), "(message['content'])\n", (1931, 1951), True, 'import streamlit as st\n'), ((2084, 2112), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (2099, 2112), True, 'import streamlit as st\n'), ((2127, 2159), 'streamlit.spinner', 'st.spinner', (['"""Ich denke nach ..."""'], {}), "('Ich denke nach ...')\n", (2137, 2159), True, 'import streamlit as st\n'), ((2221, 2248), 'streamlit.write', 'st.write', (['response.response'], {}), '(response.response)\n', (2229, 2248), True, 'import streamlit as st\n'), ((2335, 2376), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (['message'], {}), '(message)\n', (2367, 2376), True, 'import streamlit as st\n')]
import logging import sys from dotenv import load_dotenv from llama_index.core import VectorStoreIndex from llama_index.readers.web import SimpleWebPageReader def setup_logging(): """ Initialize logging configuration to output logs to stdout. """ logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) def load_environment_variables(): """ Load environment variables from the .env file. """ load_dotenv() def load_web_data(url): """ Load data from a web page using SimpleWebPageReader. :param url: The URL of the web page to load. :return: A list of loaded documents. """ return SimpleWebPageReader(html_to_text=True).load_data(urls=[url]) def create_vector_store_index(documents): """ Create a VectorStoreIndex from the loaded documents. :param documents: The list of loaded documents. :return: The created VectorStoreIndex. """ return VectorStoreIndex.from_documents(documents) def query_index(index, query): """ Query the VectorStoreIndex using the provided query. :param index: The VectorStoreIndex to query. :param query: The query string. :return: The response from the query engine. """ query_engine = index.as_query_engine() return query_engine.query(query) def main(): """ Main function to orchestrate the data loading, indexing, and querying process. """ setup_logging() load_environment_variables() url = 'https://www.llamaindex.ai/blog/agentic-rag-with-llamaindex-2721b8a49ff6' documents = load_web_data(url) index = create_vector_store_index(documents) query = "Agentic RAG is an example of:" response = query_index(index, query) print(response) if __name__ == "__main__": main()
[ "llama_index.readers.web.SimpleWebPageReader", "llama_index.core.VectorStoreIndex.from_documents" ]
[((264, 322), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (283, 322), False, 'import logging\n'), ((506, 519), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (517, 519), False, 'from dotenv import load_dotenv\n'), ((1002, 1044), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (1033, 1044), False, 'from llama_index.core import VectorStoreIndex\n'), ((358, 398), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (379, 398), False, 'import logging\n'), ((327, 346), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (344, 346), False, 'import logging\n'), ((719, 757), 'llama_index.readers.web.SimpleWebPageReader', 'SimpleWebPageReader', ([], {'html_to_text': '(True)'}), '(html_to_text=True)\n', (738, 757), False, 'from llama_index.readers.web import SimpleWebPageReader\n')]
# %% [markdown] # # Llama-Index Quickstart # # In this quickstart you will create a simple Llama Index App and learn how to log it and get feedback on an LLM response. # # For evaluation, we will leverage the "hallucination triad" of groundedness, context relevance and answer relevance. # # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/truera/trulens/blob/main/trulens_eval/examples/quickstart/llama_index_quickstart.ipynb) # %% # %pip install -qU "trulens_eval>=0.19.2" "llama_index>0.9.17" "html2text>=2020.1.16" qdrant_client python-dotenv ipywidgets streamlit_jupyter "litellm>=1.15.1" google-cloud-aiplatform import os from trulens_eval import Feedback, TruLlama from trulens_eval.feedback import Groundedness from trulens_eval import LiteLLM import numpy as np from trulens_eval import Tru from google.cloud import aiplatform from llama_index.readers.web import SimpleWebPageReader from llama_index import VectorStoreIndex, StorageContext, ServiceContext from llama_index.embeddings import GeminiEmbedding from llama_index.llms import Gemini from llama_index.vector_stores import QdrantVectorStore import qdrant_client from llama_index import StorageContext GOOGLE_API_KEY = os.environ["GEMINI_API_KEY"] # This is used by the LiteLLM for Vertex AI models including Gemini. # The LiteLLM wrapper for Gemini is used by the TruLens evaluation provider. aiplatform.init(project="fovi-site", location="us-west1") tru = Tru(database_redact_keys=True) # ### Create Simple LLM Application # # This example uses LlamaIndex which internally uses an OpenAI LLM. __documents = SimpleWebPageReader(html_to_text=True).load_data( ["http://paulgraham.com/worked.html"] ) # from llama_index.vector_stores import ChromaVectorStore # import chromadb # # initialize client, setting path to save data # db = chromadb.PersistentClient(path="./chroma_db") # # create collection # chroma_collection = db.get_or_create_collection("quickstart") # # assign chroma as the vector_store to the context # vector_store = ChromaVectorStore(chroma_collection=chroma_collection) # Create a local Qdrant vector store __client = qdrant_client.QdrantClient(path="qdrant_gemini_3") __vector_store = QdrantVectorStore(client=__client, collection_name="collection") # Using the embedding model to Gemini __embed_model = GeminiEmbedding( model_name="models/embedding-001", api_key=GOOGLE_API_KEY ) __service_context = ServiceContext.from_defaults( llm=Gemini(api_key=GOOGLE_API_KEY), embed_model=__embed_model ) __storage_context = StorageContext.from_defaults(vector_store=__vector_store) __index = VectorStoreIndex.from_documents( __documents, service_context=__service_context, storage_context=__storage_context, show_progress=True, ) def load_llamaindex_app(): return __index.as_query_engine() query_engine = load_llamaindex_app() # response = query_engine.query("What does the author say about their education?") # print(response) # response = query_engine.query("Where did the author go to school?") # print(response) # response = query_engine.query("Who was the author's Harvard PhD advisor?") # print(response) # response = query_engine.query("who was Tom Cheatham to the author?") # print(response) # response = query_engine.query("who is Tom? why is he in this story?") # print(response) # response = query_engine.query("what is this story about? what are the most important things the author want the reader to learn?") # print(response) # ## Initialize Feedback Function(s) # import litellm # litellm.set_verbose=True # Initialize provider class gemini_provider = LiteLLM(model_engine="gemini-pro") grounded = Groundedness(groundedness_provider=gemini_provider) # Define a groundedness feedback function f_groundedness = ( Feedback(grounded.groundedness_measure_with_cot_reasons) .on(TruLlama.select_source_nodes().node.text.collect()) .on_output() .aggregate(grounded.grounded_statements_aggregator) ) # Question/answer relevance between overall question and answer. f_qa_relevance = Feedback(gemini_provider.relevance).on_input_output() # Question/statement relevance between question and each context chunk. f_qs_relevance = ( Feedback(gemini_provider.qs_relevance) .on_input() .on(TruLlama.select_source_nodes().node.text) .aggregate(np.mean) ) # ## Instrument app for logging with TruLens tru_query_engine_recorder = TruLlama( query_engine, tru=tru, app_id="PaulGraham", initial_app_loader=load_llamaindex_app, feedbacks=[f_groundedness, f_qa_relevance, f_qs_relevance], ) # # or as context manager # with tru_query_engine_recorder as recording: # response = query_engine.query("Why did the author drop AI?") # print(response) # ## Explore in a Dashboard tru.run_dashboard() # open a local streamlit app to explore # tru.run_dashboard_in_jupyter() # open a streamlit app in the notebook # tru.stop_dashboard(force=True) # stop if needed # Alternatively, you can run `trulens-eval` from a command line in the same folder to start the dashboard. # Note: Feedback functions evaluated in the deferred manner can be seen in the "Progress" page of the TruLens dashboard. # ## Or view results directly in your notebook # tru.get_records_and_feedback(app_ids=[])[0] # pass an empty list of app_ids to get all # def load_llamaindex_app(): # # from llama_index import VectorStoreIndex # index = VectorStoreIndex.from_documents(documents) # query_engine = index.as_query_engine() # return query_engine # app2 = load_llamaindex_app() # # tru_app2 = tru.Llama( # # Can't specify which Tru instance to use with tru.Llama. # tru_app2 = TruLlama( # app2, # tru=tru, # app_id="llamaindex_appZZ", # initial_app_loader=load_llamaindex_app, # feedbacks=[f_groundedness, f_qa_relevance, f_qs_relevance] # ) # tru.add_app(tru_app2) # from trulens_eval.appui import AppUI # aui = AppUI( # app=tru_app2, # app_selectors=[ # ], # record_selectors=[ # "app.retriever.retrieve[0].rets[:].score", # "app.retriever.retrieve[0].rets[:].node.text", # ] # ) # aui.widget
[ "llama_index.VectorStoreIndex.from_documents", "llama_index.llms.Gemini", "llama_index.StorageContext.from_defaults", "llama_index.vector_stores.QdrantVectorStore", "llama_index.readers.web.SimpleWebPageReader", "llama_index.embeddings.GeminiEmbedding" ]
[((1436, 1493), 'google.cloud.aiplatform.init', 'aiplatform.init', ([], {'project': '"""fovi-site"""', 'location': '"""us-west1"""'}), "(project='fovi-site', location='us-west1')\n", (1451, 1493), False, 'from google.cloud import aiplatform\n'), ((1501, 1531), 'trulens_eval.Tru', 'Tru', ([], {'database_redact_keys': '(True)'}), '(database_redact_keys=True)\n', (1504, 1531), False, 'from trulens_eval import Tru\n'), ((2190, 2240), 'qdrant_client.QdrantClient', 'qdrant_client.QdrantClient', ([], {'path': '"""qdrant_gemini_3"""'}), "(path='qdrant_gemini_3')\n", (2216, 2240), False, 'import qdrant_client\n'), ((2259, 2323), 'llama_index.vector_stores.QdrantVectorStore', 'QdrantVectorStore', ([], {'client': '__client', 'collection_name': '"""collection"""'}), "(client=__client, collection_name='collection')\n", (2276, 2323), False, 'from llama_index.vector_stores import QdrantVectorStore\n'), ((2379, 2453), 'llama_index.embeddings.GeminiEmbedding', 'GeminiEmbedding', ([], {'model_name': '"""models/embedding-001"""', 'api_key': 'GOOGLE_API_KEY'}), "(model_name='models/embedding-001', api_key=GOOGLE_API_KEY)\n", (2394, 2453), False, 'from llama_index.embeddings import GeminiEmbedding\n'), ((2598, 2655), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': '__vector_store'}), '(vector_store=__vector_store)\n', (2626, 2655), False, 'from llama_index import StorageContext\n'), ((2667, 2806), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['__documents'], {'service_context': '__service_context', 'storage_context': '__storage_context', 'show_progress': '(True)'}), '(__documents, service_context=\n __service_context, storage_context=__storage_context, show_progress=True)\n', (2698, 2806), False, 'from llama_index import VectorStoreIndex, StorageContext, ServiceContext\n'), ((3677, 3711), 'trulens_eval.LiteLLM', 'LiteLLM', ([], {'model_engine': '"""gemini-pro"""'}), "(model_engine='gemini-pro')\n", (3684, 3711), False, 'from trulens_eval import LiteLLM\n'), ((3724, 3775), 'trulens_eval.feedback.Groundedness', 'Groundedness', ([], {'groundedness_provider': 'gemini_provider'}), '(groundedness_provider=gemini_provider)\n', (3736, 3775), False, 'from trulens_eval.feedback import Groundedness\n'), ((4473, 4634), 'trulens_eval.TruLlama', 'TruLlama', (['query_engine'], {'tru': 'tru', 'app_id': '"""PaulGraham"""', 'initial_app_loader': 'load_llamaindex_app', 'feedbacks': '[f_groundedness, f_qa_relevance, f_qs_relevance]'}), "(query_engine, tru=tru, app_id='PaulGraham', initial_app_loader=\n load_llamaindex_app, feedbacks=[f_groundedness, f_qa_relevance,\n f_qs_relevance])\n", (4481, 4634), False, 'from trulens_eval import Feedback, TruLlama\n'), ((1654, 1692), 'llama_index.readers.web.SimpleWebPageReader', 'SimpleWebPageReader', ([], {'html_to_text': '(True)'}), '(html_to_text=True)\n', (1673, 1692), False, 'from llama_index.readers.web import SimpleWebPageReader\n'), ((2518, 2548), 'llama_index.llms.Gemini', 'Gemini', ([], {'api_key': 'GOOGLE_API_KEY'}), '(api_key=GOOGLE_API_KEY)\n', (2524, 2548), False, 'from llama_index.llms import Gemini\n'), ((4117, 4152), 'trulens_eval.Feedback', 'Feedback', (['gemini_provider.relevance'], {}), '(gemini_provider.relevance)\n', (4125, 4152), False, 'from trulens_eval import Feedback, TruLlama\n'), ((4330, 4360), 'trulens_eval.TruLlama.select_source_nodes', 'TruLlama.select_source_nodes', ([], {}), '()\n', (4358, 4360), False, 'from trulens_eval import Feedback, TruLlama\n'), ((3842, 3898), 'trulens_eval.Feedback', 'Feedback', (['grounded.groundedness_measure_with_cot_reasons'], {}), '(grounded.groundedness_measure_with_cot_reasons)\n', (3850, 3898), False, 'from trulens_eval import Feedback, TruLlama\n'), ((4267, 4305), 'trulens_eval.Feedback', 'Feedback', (['gemini_provider.qs_relevance'], {}), '(gemini_provider.qs_relevance)\n', (4275, 4305), False, 'from trulens_eval import Feedback, TruLlama\n'), ((3907, 3937), 'trulens_eval.TruLlama.select_source_nodes', 'TruLlama.select_source_nodes', ([], {}), '()\n', (3935, 3937), False, 'from trulens_eval import Feedback, TruLlama\n')]
import os import shutil import tarfile import tempfile import time from pathlib import Path import arxiv import openai import pandas as pd import pdfplumber import streamlit as st from llama_index import (KeywordTableIndex, KnowledgeGraphIndex, ServiceContext, SimpleDirectoryReader, SummaryIndex, TreeIndex, VectorStoreIndex, download_loader, set_global_service_context) from llama_index.llms import OpenAI, Xinference from llama_index.schema import Document from PIL import Image from st_files_connection import FilesConnection from xinference.client import RESTfulClient from pdfextract.export_annotation import export_annotation from pdfextract.pdf_extract import pdf_extract from texannotate.annotate_file import annotate_file from texannotate.color_annotation import ColorAnnotation from texcompile.client import compile_pdf_return_bytes from utils.utils import (find_latex_file, postprocess_latex, preprocess_latex, tup2str) st.set_page_config(page_title='Chat with arXiv paper without PDF noise, powered by LaTeX Rainbow.', layout="wide") texcompile_host = st.secrets.texcompile_host texcompile_port = st.secrets.texcompile_port def main(): """ The main function for the Streamlit app. :return: None. """ st.title("Chat with arXiv paper, without PDF noise") st.sidebar.markdown('# Github link: [LaTeX Rainbow](https://github.com/InsightsNet/texannotate)') st.sidebar.markdown("""<small>It's always good practice to verify that a website is safe before giving it your API key. This site is open source, so you can check the code yourself, or run the streamlit app locally.</small>""", unsafe_allow_html=True) col1, col2 = st.columns([1, 0.8], gap='medium') with col2: with st.form("my_form"): api_key = st.text_input("Enter OpenAI API key here.", type='password') arxiv_id = st.text_input("Please enter a arXiv paper id:", value='1601.00978') submitted = st.form_submit_button("Submit and process arXiv paper (click once and wait)") if submitted: process_submit_button(col1, col2, arxiv_id, api_key) index = load_data() st.session_state["index"] = index if 'index' in st.session_state: if "imgs" in st.session_state.keys(): with col1.container(): for img in st.session_state["imgs"]: st.image(img) chat_engine = st.session_state["index"].as_chat_engine(chat_mode="condense_question", verbose=True) if "messages" not in st.session_state.keys(): # Initialize the chat message history st.session_state.messages = [ {"role": "assistant", "content": "Ask me a question about the paper!"} ] if prompt := st.chat_input("Your question"): # Prompt for user input and save to chat history st.session_state.messages.append({"role": "user", "content": prompt}) for message in st.session_state.messages: # Display the prior chat messages with st.chat_message(message["role"]): st.write(message["content"]) # If last message is not from assistant, generate a new response if st.session_state.messages[-1]["role"] != "assistant": with st.chat_message("assistant"): with st.spinner("Thinking..."): response = chat_engine.chat(prompt) st.write(response.response) message = {"role": "assistant", "content": response.response} st.session_state.messages.append(message) # Add response to message history def process_submit_button(col1, col2, arxiv_id, api_key): with col2: with st.spinner("Downloading LaTeX code..."): filename = validate_input(arxiv_id) if not filename: st.error("id not found on arXiv, or the paper doesn't contain LaTeX code.") return with st.spinner("Annotating LaTeX code... please wait..."): df_toc, df_data = extract_file(filename, col1) df_data.to_csv('data.csv', sep='\t') with st.spinner("Loading llm..."): if api_key == '': st.error('Please set your OpenAI key.') if api_key == 'local': set_local_llm() else: openai.api_key = api_key set_openai_llm() st.info("Now you get a cleaned PDF. Only colored part are penned by paper author. Extracted text are resorted by the reading order.", icon="📃") @st.cache_resource(show_spinner=True) def load_data(): df_data = pd.read_csv('data.csv', sep='\t') text = '' section_id = 0 df_data.index.name='myindex' for i, row in df_data[df_data['reading_order']!=-1].sort_values(by=['reading_order', 'myindex']).iterrows(): if row['section_id'] > section_id: text += '\n' section_id = row['section_id'] if row['label'] != 'Figure': text += row['token'] + ' ' sections = text.split('\n') docs = [Document(text=section) for section in sections] with st.spinner(text="Loading and indexing the paper - hang tight! This should take 1-2 minutes."): index = VectorStoreIndex.from_documents(docs) return index def validate_input(arxiv_id): try: paper = next(arxiv.Search(id_list=[arxiv_id]).results()) filename = paper.download_source() return filename except: return False def extract_file(filename, col1): with col1: placeholder = st.empty() st.session_state['imgs'] = [] try: Path("tmp").mkdir(parents=True, exist_ok=True) td = 'tmp' #print('temp dir', td) with tarfile.open(filename ,'r:gz') as tar: tar.extractall(td) preprocess_latex(td) basename, pdf_bytes = compile_pdf_return_bytes( sources_dir=td, host=texcompile_host, port=texcompile_port ) # compile the unmodified latex firstly with placeholder.container(): for page in pdfplumber.open(pdf_bytes).pages: image = page.to_image(resolution=300).original st.image(image) shapes, tokens = pdf_extract(pdf_bytes) ## get colors color_dict = ColorAnnotation() for rect in shapes: color_dict.add_existing_color(tup2str(rect['stroking_color'])) for token in tokens: color_dict.add_existing_color(token['color']) shutil.rmtree(td) Path("tmp").mkdir(parents=True, exist_ok=True) with tarfile.open(filename ,'r:gz') as tar: tar.extractall(td) tex_file = Path(find_latex_file(Path(basename).stem, basepath=td)).name annotate_file(tex_file, color_dict, latex_context=None, basepath=td) postprocess_latex(str(Path(find_latex_file(Path(basename).stem, basepath=td)))) basename, pdf_bytes_mod = compile_pdf_return_bytes( sources_dir=td, host=texcompile_host, port=texcompile_port ) # compile the modified latex placeholder.empty() with placeholder.container(): for page in pdfplumber.open(pdf_bytes_mod).pages: image = page.to_image(resolution=300).original st.image(image) shapes, tokens = pdf_extract(pdf_bytes_mod) df_toc, df_data = export_annotation(shapes, tokens, color_dict) shutil.rmtree(td) colors = { "Abstract":(255, 182, 193), "Author":(0, 0, 139), "Caption":(57, 230, 10), "Equation":(255, 0, 0),"Figure":(230, 51, 249),"Footer":(255, 255, 255), "List":(46, 33, 109),"Paragraph":(181, 196, 220),"Reference":(81, 142, 32), "Section":(24, 14, 248),"Table":(129, 252, 254),"Title":(97, 189, 251) } imgs = [] placeholder.empty() with placeholder.container(): for i, page in enumerate(pdfplumber.open(pdf_bytes).pages): image = page.to_image(resolution=300) for _, rect in df_data.iterrows(): if rect['page'] == i+1: color = colors.get(rect['label'], (0,0,0)) image.draw_rect((rect['x0'], rect['y0'], rect['x1'], rect['y1']), fill=(color[0],color[1],color[2],70), stroke=color, stroke_width=1) imgs.append(image.annotated) st.image(image.annotated) st.session_state['imgs'] = imgs return df_toc, df_data except Exception as e: raise e #st.error("LaTeX code parsing error, please follow LaTeX Rainbow's example to add new parsing rules.") return None, None def set_local_llm(): port = 9997 # replace with your endpoint port number client = RESTfulClient(f"http://localhost:{port}") # Download and Launch a model, this may take a while the first time model_uid = client.launch_model( model_name="llama-2-chat", model_size_in_billions=7, model_format="pytorch", quantization="none", ) # Initiate Xinference object to use the LLM llm = Xinference( endpoint=f"http://localhost:{port}", model_uid=model_uid, temperature=0.5, max_tokens=512, ) service_context = ServiceContext.from_defaults( llm=llm, embed_model="local:BAAI/bge-small-en" ) set_global_service_context(service_context) def set_openai_llm(): service_context = ServiceContext.from_defaults(llm=OpenAI(model="gpt-3.5-turbo", temperature=0.5, system_prompt="You are an expert on the paper and your job is to answer technical questions. Keep your answers precise and based on facts – do not hallucinate features.")) set_global_service_context(service_context) if __name__ == '__main__': main()
[ "llama_index.VectorStoreIndex.from_documents", "llama_index.schema.Document", "llama_index.ServiceContext.from_defaults", "llama_index.llms.OpenAI", "llama_index.set_global_service_context", "llama_index.llms.Xinference" ]
[((1035, 1158), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Chat with arXiv paper without PDF noise, powered by LaTeX Rainbow."""', 'layout': '"""wide"""'}), "(page_title=\n 'Chat with arXiv paper without PDF noise, powered by LaTeX Rainbow.',\n layout='wide')\n", (1053, 1158), True, 'import streamlit as st\n'), ((4713, 4749), 'streamlit.cache_resource', 'st.cache_resource', ([], {'show_spinner': '(True)'}), '(show_spinner=True)\n', (4730, 4749), True, 'import streamlit as st\n'), ((1338, 1390), 'streamlit.title', 'st.title', (['"""Chat with arXiv paper, without PDF noise"""'], {}), "('Chat with arXiv paper, without PDF noise')\n", (1346, 1390), True, 'import streamlit as st\n'), ((1395, 1502), 'streamlit.sidebar.markdown', 'st.sidebar.markdown', (['"""# Github link: [LaTeX Rainbow](https://github.com/InsightsNet/texannotate)"""'], {}), "(\n '# Github link: [LaTeX Rainbow](https://github.com/InsightsNet/texannotate)'\n )\n", (1414, 1502), True, 'import streamlit as st\n'), ((1497, 1783), 'streamlit.sidebar.markdown', 'st.sidebar.markdown', (['"""<small>It\'s always good practice to verify that a website is safe before giving it your API key. \n This site is open source, so you can check the code yourself, or run the streamlit app locally.</small>"""'], {'unsafe_allow_html': '(True)'}), '(\n """<small>It\'s always good practice to verify that a website is safe before giving it your API key. \n This site is open source, so you can check the code yourself, or run the streamlit app locally.</small>"""\n , unsafe_allow_html=True)\n', (1516, 1783), True, 'import streamlit as st\n'), ((1791, 1825), 'streamlit.columns', 'st.columns', (['[1, 0.8]'], {'gap': '"""medium"""'}), "([1, 0.8], gap='medium')\n", (1801, 1825), True, 'import streamlit as st\n'), ((4781, 4814), 'pandas.read_csv', 'pd.read_csv', (['"""data.csv"""'], {'sep': '"""\t"""'}), "('data.csv', sep='\\t')\n", (4792, 4814), True, 'import pandas as pd\n'), ((9288, 9329), 'xinference.client.RESTfulClient', 'RESTfulClient', (['f"""http://localhost:{port}"""'], {}), "(f'http://localhost:{port}')\n", (9301, 9329), False, 'from xinference.client import RESTfulClient\n'), ((9635, 9740), 'llama_index.llms.Xinference', 'Xinference', ([], {'endpoint': 'f"""http://localhost:{port}"""', 'model_uid': 'model_uid', 'temperature': '(0.5)', 'max_tokens': '(512)'}), "(endpoint=f'http://localhost:{port}', model_uid=model_uid,\n temperature=0.5, max_tokens=512)\n", (9645, 9740), False, 'from llama_index.llms import OpenAI, Xinference\n'), ((9798, 9874), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': '"""local:BAAI/bge-small-en"""'}), "(llm=llm, embed_model='local:BAAI/bge-small-en')\n", (9826, 9874), False, 'from llama_index import KeywordTableIndex, KnowledgeGraphIndex, ServiceContext, SimpleDirectoryReader, SummaryIndex, TreeIndex, VectorStoreIndex, download_loader, set_global_service_context\n'), ((9893, 9936), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (9919, 9936), False, 'from llama_index import KeywordTableIndex, KnowledgeGraphIndex, ServiceContext, SimpleDirectoryReader, SummaryIndex, TreeIndex, VectorStoreIndex, download_loader, set_global_service_context\n'), ((10239, 10282), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (10265, 10282), False, 'from llama_index import KeywordTableIndex, KnowledgeGraphIndex, ServiceContext, SimpleDirectoryReader, SummaryIndex, TreeIndex, VectorStoreIndex, download_loader, set_global_service_context\n'), ((4566, 4719), 'streamlit.info', 'st.info', (['"""Now you get a cleaned PDF. Only colored part are penned by paper author. Extracted text are resorted by the reading order."""'], {'icon': '"""📃"""'}), "(\n 'Now you get a cleaned PDF. Only colored part are penned by paper author. Extracted text are resorted by the reading order.'\n , icon='📃')\n", (4573, 4719), True, 'import streamlit as st\n'), ((5226, 5248), 'llama_index.schema.Document', 'Document', ([], {'text': 'section'}), '(text=section)\n', (5234, 5248), False, 'from llama_index.schema import Document\n'), ((5283, 5386), 'streamlit.spinner', 'st.spinner', ([], {'text': '"""Loading and indexing the paper - hang tight! This should take 1-2 minutes."""'}), "(text=\n 'Loading and indexing the paper - hang tight! This should take 1-2 minutes.'\n )\n", (5293, 5386), True, 'import streamlit as st\n'), ((5394, 5431), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['docs'], {}), '(docs)\n', (5425, 5431), False, 'from llama_index import KeywordTableIndex, KnowledgeGraphIndex, ServiceContext, SimpleDirectoryReader, SummaryIndex, TreeIndex, VectorStoreIndex, download_loader, set_global_service_context\n'), ((5739, 5749), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (5747, 5749), True, 'import streamlit as st\n'), ((1854, 1872), 'streamlit.form', 'st.form', (['"""my_form"""'], {}), "('my_form')\n", (1861, 1872), True, 'import streamlit as st\n'), ((1896, 1956), 'streamlit.text_input', 'st.text_input', (['"""Enter OpenAI API key here."""'], {'type': '"""password"""'}), "('Enter OpenAI API key here.', type='password')\n", (1909, 1956), True, 'import streamlit as st\n'), ((1980, 2047), 'streamlit.text_input', 'st.text_input', (['"""Please enter a arXiv paper id:"""'], {'value': '"""1601.00978"""'}), "('Please enter a arXiv paper id:', value='1601.00978')\n", (1993, 2047), True, 'import streamlit as st\n'), ((2072, 2149), 'streamlit.form_submit_button', 'st.form_submit_button', (['"""Submit and process arXiv paper (click once and wait)"""'], {}), "('Submit and process arXiv paper (click once and wait)')\n", (2093, 2149), True, 'import streamlit as st\n'), ((2388, 2411), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (2409, 2411), True, 'import streamlit as st\n'), ((2674, 2697), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (2695, 2697), True, 'import streamlit as st\n'), ((2902, 2932), 'streamlit.chat_input', 'st.chat_input', (['"""Your question"""'], {}), "('Your question')\n", (2915, 2932), True, 'import streamlit as st\n'), ((2995, 3064), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (3027, 3064), True, 'import streamlit as st\n'), ((3858, 3897), 'streamlit.spinner', 'st.spinner', (['"""Downloading LaTeX code..."""'], {}), "('Downloading LaTeX code...')\n", (3868, 3897), True, 'import streamlit as st\n'), ((4105, 4158), 'streamlit.spinner', 'st.spinner', (['"""Annotating LaTeX code... please wait..."""'], {}), "('Annotating LaTeX code... please wait...')\n", (4115, 4158), True, 'import streamlit as st\n'), ((4282, 4310), 'streamlit.spinner', 'st.spinner', (['"""Loading llm..."""'], {}), "('Loading llm...')\n", (4292, 4310), True, 'import streamlit as st\n'), ((6081, 6170), 'texcompile.client.compile_pdf_return_bytes', 'compile_pdf_return_bytes', ([], {'sources_dir': 'td', 'host': 'texcompile_host', 'port': 'texcompile_port'}), '(sources_dir=td, host=texcompile_host, port=\n texcompile_port)\n', (6105, 6170), False, 'from texcompile.client import compile_pdf_return_bytes\n'), ((6505, 6527), 'pdfextract.pdf_extract.pdf_extract', 'pdf_extract', (['pdf_bytes'], {}), '(pdf_bytes)\n', (6516, 6527), False, 'from pdfextract.pdf_extract import pdf_extract\n'), ((6579, 6596), 'texannotate.color_annotation.ColorAnnotation', 'ColorAnnotation', ([], {}), '()\n', (6594, 6596), False, 'from texannotate.color_annotation import ColorAnnotation\n'), ((6815, 6832), 'shutil.rmtree', 'shutil.rmtree', (['td'], {}), '(td)\n', (6828, 6832), False, 'import shutil\n'), ((7080, 7148), 'texannotate.annotate_file.annotate_file', 'annotate_file', (['tex_file', 'color_dict'], {'latex_context': 'None', 'basepath': 'td'}), '(tex_file, color_dict, latex_context=None, basepath=td)\n', (7093, 7148), False, 'from texannotate.annotate_file import annotate_file\n'), ((7279, 7368), 'texcompile.client.compile_pdf_return_bytes', 'compile_pdf_return_bytes', ([], {'sources_dir': 'td', 'host': 'texcompile_host', 'port': 'texcompile_port'}), '(sources_dir=td, host=texcompile_host, port=\n texcompile_port)\n', (7303, 7368), False, 'from texcompile.client import compile_pdf_return_bytes\n'), ((7728, 7754), 'pdfextract.pdf_extract.pdf_extract', 'pdf_extract', (['pdf_bytes_mod'], {}), '(pdf_bytes_mod)\n', (7739, 7754), False, 'from pdfextract.pdf_extract import pdf_extract\n'), ((7785, 7830), 'pdfextract.export_annotation.export_annotation', 'export_annotation', (['shapes', 'tokens', 'color_dict'], {}), '(shapes, tokens, color_dict)\n', (7802, 7830), False, 'from pdfextract.export_annotation import export_annotation\n'), ((7843, 7860), 'shutil.rmtree', 'shutil.rmtree', (['td'], {}), '(td)\n', (7856, 7860), False, 'import shutil\n'), ((10016, 10243), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""', 'temperature': '(0.5)', 'system_prompt': '"""You are an expert on the paper and your job is to answer technical questions. Keep your answers precise and based on facts – do not hallucinate features."""'}), "(model='gpt-3.5-turbo', temperature=0.5, system_prompt=\n 'You are an expert on the paper and your job is to answer technical questions. Keep your answers precise and based on facts – do not hallucinate features.'\n )\n", (10022, 10243), False, 'from llama_index.llms import OpenAI, Xinference\n'), ((3175, 3207), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (3190, 3207), True, 'import streamlit as st\n'), ((3225, 3253), 'streamlit.write', 'st.write', (["message['content']"], {}), "(message['content'])\n", (3233, 3253), True, 'import streamlit as st\n'), ((3410, 3438), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (3425, 3438), True, 'import streamlit as st\n'), ((3992, 4067), 'streamlit.error', 'st.error', (['"""id not found on arXiv, or the paper doesn\'t contain LaTeX code."""'], {}), '("id not found on arXiv, or the paper doesn\'t contain LaTeX code.")\n', (4000, 4067), True, 'import streamlit as st\n'), ((4358, 4397), 'streamlit.error', 'st.error', (['"""Please set your OpenAI key."""'], {}), "('Please set your OpenAI key.')\n", (4366, 4397), True, 'import streamlit as st\n'), ((5935, 5965), 'tarfile.open', 'tarfile.open', (['filename', '"""r:gz"""'], {}), "(filename, 'r:gz')\n", (5947, 5965), False, 'import tarfile\n'), ((6025, 6045), 'utils.utils.preprocess_latex', 'preprocess_latex', (['td'], {}), '(td)\n', (6041, 6045), False, 'from utils.utils import find_latex_file, postprocess_latex, preprocess_latex, tup2str\n'), ((6910, 6940), 'tarfile.open', 'tarfile.open', (['filename', '"""r:gz"""'], {}), "(filename, 'r:gz')\n", (6922, 6940), False, 'import tarfile\n'), ((2521, 2534), 'streamlit.image', 'st.image', (['img'], {}), '(img)\n', (2529, 2534), True, 'import streamlit as st\n'), ((3461, 3486), 'streamlit.spinner', 'st.spinner', (['"""Thinking..."""'], {}), "('Thinking...')\n", (3471, 3486), True, 'import streamlit as st\n'), ((3564, 3591), 'streamlit.write', 'st.write', (['response.response'], {}), '(response.response)\n', (3572, 3591), True, 'import streamlit as st\n'), ((3694, 3735), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (['message'], {}), '(message)\n', (3726, 3735), True, 'import streamlit as st\n'), ((5519, 5551), 'arxiv.Search', 'arxiv.Search', ([], {'id_list': '[arxiv_id]'}), '(id_list=[arxiv_id])\n', (5531, 5551), False, 'import arxiv\n'), ((5813, 5824), 'pathlib.Path', 'Path', (['"""tmp"""'], {}), "('tmp')\n", (5817, 5824), False, 'from pathlib import Path\n'), ((6338, 6364), 'pdfplumber.open', 'pdfplumber.open', (['pdf_bytes'], {}), '(pdf_bytes)\n', (6353, 6364), False, 'import pdfplumber\n'), ((6459, 6474), 'streamlit.image', 'st.image', (['image'], {}), '(image)\n', (6467, 6474), True, 'import streamlit as st\n'), ((6675, 6706), 'utils.utils.tup2str', 'tup2str', (["rect['stroking_color']"], {}), "(rect['stroking_color'])\n", (6682, 6706), False, 'from utils.utils import find_latex_file, postprocess_latex, preprocess_latex, tup2str\n'), ((6845, 6856), 'pathlib.Path', 'Path', (['"""tmp"""'], {}), "('tmp')\n", (6849, 6856), False, 'from pathlib import Path\n'), ((7558, 7588), 'pdfplumber.open', 'pdfplumber.open', (['pdf_bytes_mod'], {}), '(pdf_bytes_mod)\n', (7573, 7588), False, 'import pdfplumber\n'), ((7683, 7698), 'streamlit.image', 'st.image', (['image'], {}), '(image)\n', (7691, 7698), True, 'import streamlit as st\n'), ((8893, 8918), 'streamlit.image', 'st.image', (['image.annotated'], {}), '(image.annotated)\n', (8901, 8918), True, 'import streamlit as st\n'), ((8395, 8421), 'pdfplumber.open', 'pdfplumber.open', (['pdf_bytes'], {}), '(pdf_bytes)\n', (8410, 8421), False, 'import pdfplumber\n'), ((7028, 7042), 'pathlib.Path', 'Path', (['basename'], {}), '(basename)\n', (7032, 7042), False, 'from pathlib import Path\n'), ((7204, 7218), 'pathlib.Path', 'Path', (['basename'], {}), '(basename)\n', (7208, 7218), False, 'from pathlib import Path\n')]
"""Global eval handlers.""" from typing import Any from llama_index.legacy.callbacks.argilla_callback import argilla_callback_handler from llama_index.legacy.callbacks.arize_phoenix_callback import ( arize_phoenix_callback_handler, ) from llama_index.legacy.callbacks.base_handler import BaseCallbackHandler from llama_index.legacy.callbacks.deepeval_callback import deepeval_callback_handler from llama_index.legacy.callbacks.honeyhive_callback import honeyhive_callback_handler from llama_index.legacy.callbacks.open_inference_callback import ( OpenInferenceCallbackHandler, ) from llama_index.legacy.callbacks.promptlayer_handler import PromptLayerHandler from llama_index.legacy.callbacks.simple_llm_handler import SimpleLLMHandler from llama_index.legacy.callbacks.wandb_callback import WandbCallbackHandler def set_global_handler(eval_mode: str, **eval_params: Any) -> None: """Set global eval handlers.""" import llama_index.legacy llama_index.legacy.global_handler = create_global_handler(eval_mode, **eval_params) def create_global_handler(eval_mode: str, **eval_params: Any) -> BaseCallbackHandler: """Get global eval handler.""" if eval_mode == "wandb": handler: BaseCallbackHandler = WandbCallbackHandler(**eval_params) elif eval_mode == "openinference": handler = OpenInferenceCallbackHandler(**eval_params) elif eval_mode == "arize_phoenix": handler = arize_phoenix_callback_handler(**eval_params) elif eval_mode == "honeyhive": handler = honeyhive_callback_handler(**eval_params) elif eval_mode == "promptlayer": handler = PromptLayerHandler(**eval_params) elif eval_mode == "deepeval": handler = deepeval_callback_handler(**eval_params) elif eval_mode == "simple": handler = SimpleLLMHandler(**eval_params) elif eval_mode == "argilla": handler = argilla_callback_handler(**eval_params) else: raise ValueError(f"Eval mode {eval_mode} not supported.") return handler
[ "llama_index.legacy.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler", "llama_index.legacy.callbacks.promptlayer_handler.PromptLayerHandler", "llama_index.legacy.callbacks.simple_llm_handler.SimpleLLMHandler", "llama_index.legacy.callbacks.argilla_callback.argilla_callback_handler", "llama_index.legacy.callbacks.wandb_callback.WandbCallbackHandler", "llama_index.legacy.callbacks.deepeval_callback.deepeval_callback_handler", "llama_index.legacy.callbacks.honeyhive_callback.honeyhive_callback_handler", "llama_index.legacy.callbacks.open_inference_callback.OpenInferenceCallbackHandler" ]
[((1239, 1274), 'llama_index.legacy.callbacks.wandb_callback.WandbCallbackHandler', 'WandbCallbackHandler', ([], {}), '(**eval_params)\n', (1259, 1274), False, 'from llama_index.legacy.callbacks.wandb_callback import WandbCallbackHandler\n'), ((1332, 1375), 'llama_index.legacy.callbacks.open_inference_callback.OpenInferenceCallbackHandler', 'OpenInferenceCallbackHandler', ([], {}), '(**eval_params)\n', (1360, 1375), False, 'from llama_index.legacy.callbacks.open_inference_callback import OpenInferenceCallbackHandler\n'), ((1433, 1478), 'llama_index.legacy.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler', 'arize_phoenix_callback_handler', ([], {}), '(**eval_params)\n', (1463, 1478), False, 'from llama_index.legacy.callbacks.arize_phoenix_callback import arize_phoenix_callback_handler\n'), ((1532, 1573), 'llama_index.legacy.callbacks.honeyhive_callback.honeyhive_callback_handler', 'honeyhive_callback_handler', ([], {}), '(**eval_params)\n', (1558, 1573), False, 'from llama_index.legacy.callbacks.honeyhive_callback import honeyhive_callback_handler\n'), ((1629, 1662), 'llama_index.legacy.callbacks.promptlayer_handler.PromptLayerHandler', 'PromptLayerHandler', ([], {}), '(**eval_params)\n', (1647, 1662), False, 'from llama_index.legacy.callbacks.promptlayer_handler import PromptLayerHandler\n'), ((1715, 1755), 'llama_index.legacy.callbacks.deepeval_callback.deepeval_callback_handler', 'deepeval_callback_handler', ([], {}), '(**eval_params)\n', (1740, 1755), False, 'from llama_index.legacy.callbacks.deepeval_callback import deepeval_callback_handler\n'), ((1806, 1837), 'llama_index.legacy.callbacks.simple_llm_handler.SimpleLLMHandler', 'SimpleLLMHandler', ([], {}), '(**eval_params)\n', (1822, 1837), False, 'from llama_index.legacy.callbacks.simple_llm_handler import SimpleLLMHandler\n'), ((1889, 1928), 'llama_index.legacy.callbacks.argilla_callback.argilla_callback_handler', 'argilla_callback_handler', ([], {}), '(**eval_params)\n', (1913, 1928), False, 'from llama_index.legacy.callbacks.argilla_callback import argilla_callback_handler\n')]
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Callable, Coroutine from langchain.llms.base import BaseLLM from nemoguardrails import LLMRails, RailsConfig COLANG_CONFIG = """ define user express greeting "hi" define user express ill intent "I hate you" "I want to destroy the world" define bot express cannot respond "I'm sorry I cannot help you with that." define user express question "What is the current unemployment rate?" # Basic guardrail example define flow user express ill intent bot express cannot respond # Question answering flow define flow user ... $answer = execute llama_index_query(query=$last_user_message) bot $answer """ YAML_CONFIG = """ models: - type: main engine: openai model: gpt-3.5-turbo-instruct """ def demo(): try: import llama_index from llama_index.indices.query.base import BaseQueryEngine from llama_index.response.schema import StreamingResponse except ImportError: raise ImportError( "Could not import llama_index, please install it with " "`pip install llama_index`." ) config = RailsConfig.from_content(COLANG_CONFIG, YAML_CONFIG) app = LLMRails(config) def _get_llama_index_query_engine(llm: BaseLLM): docs = llama_index.SimpleDirectoryReader( input_files=["../examples/bots/abc/kb/employee-handbook.md"] ).load_data() llm_predictor = llama_index.LLMPredictor(llm=llm) index = llama_index.GPTVectorStoreIndex.from_documents( docs, llm_predictor=llm_predictor ) default_query_engine = index.as_query_engine() return default_query_engine def _get_callable_query_engine( query_engine: BaseQueryEngine, ) -> Callable[[str], Coroutine[Any, Any, str]]: async def get_query_response(query: str) -> str: response = query_engine.query(query) if isinstance(response, StreamingResponse): typed_response = response.get_response() else: typed_response = response response_str = typed_response.response if response_str is None: return "" return response_str return get_query_response query_engine = _get_llama_index_query_engine(app.llm) app.register_action( _get_callable_query_engine(query_engine), name="llama_index_query" ) history = [{"role": "user", "content": "How many vacation days do I get?"}] result = app.generate(messages=history) print(result) if __name__ == "__main__": demo()
[ "llama_index.GPTVectorStoreIndex.from_documents", "llama_index.LLMPredictor", "llama_index.SimpleDirectoryReader" ]
[((1797, 1849), 'nemoguardrails.RailsConfig.from_content', 'RailsConfig.from_content', (['COLANG_CONFIG', 'YAML_CONFIG'], {}), '(COLANG_CONFIG, YAML_CONFIG)\n', (1821, 1849), False, 'from nemoguardrails import LLMRails, RailsConfig\n'), ((1860, 1876), 'nemoguardrails.LLMRails', 'LLMRails', (['config'], {}), '(config)\n', (1868, 1876), False, 'from nemoguardrails import LLMRails, RailsConfig\n'), ((2100, 2133), 'llama_index.LLMPredictor', 'llama_index.LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (2124, 2133), False, 'import llama_index\n'), ((2150, 2236), 'llama_index.GPTVectorStoreIndex.from_documents', 'llama_index.GPTVectorStoreIndex.from_documents', (['docs'], {'llm_predictor': 'llm_predictor'}), '(docs, llm_predictor=\n llm_predictor)\n', (2196, 2236), False, 'import llama_index\n'), ((1946, 2046), 'llama_index.SimpleDirectoryReader', 'llama_index.SimpleDirectoryReader', ([], {'input_files': "['../examples/bots/abc/kb/employee-handbook.md']"}), "(input_files=[\n '../examples/bots/abc/kb/employee-handbook.md'])\n", (1979, 2046), False, 'import llama_index\n')]
import sys from langchain import OpenAI from pathlib import Path import llama_index as li #from llamahub.connectors import TextFileConnector from llama_index import SimpleDirectoryReader,GPTListIndex,LLMPredictor file_name = sys.argv[1] llm_predictor = LLMPredictor(llm=OpenAI(model_name="gpt-3.5-turbo")) #temperature=0, docs = SimpleDirectoryReader('.', [file_name]).load_data() index = GPTListIndex(docs) ex = """Today we finish off our study of collaborative filtering by looking closely at embeddings—a critical building block of many deep learning algorithms. Then we’ll dive into convolutional neural networks (CNNs) and see how they really work. We’ve used plenty of CNNs through this course, but we haven’t peeked inside them to see what’s really going on in there. As well as learning about their most fundamental building block, the convolution, we’ll also look at pooling, dropout, and more.""" q = f"""Here's an example of a lesson summary from a previous fast.ai lesson: "{ex}" Write a four paragraph summary of the fast.ai lesson contained in the following transcript, using a similar informal writing style to the above summary from the previous lesson.""" summary = index.query(q, response_mode="tree_summarize", llm_predictor=llm_predictor) Path(f'{Path(file_name).stem}-summ.txt').write_text(str(summary))
[ "llama_index.GPTListIndex", "llama_index.SimpleDirectoryReader" ]
[((391, 409), 'llama_index.GPTListIndex', 'GPTListIndex', (['docs'], {}), '(docs)\n', (403, 409), False, 'from llama_index import SimpleDirectoryReader, GPTListIndex, LLMPredictor\n'), ((271, 305), 'langchain.OpenAI', 'OpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""'}), "(model_name='gpt-3.5-turbo')\n", (277, 305), False, 'from langchain import OpenAI\n'), ((331, 370), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""."""', '[file_name]'], {}), "('.', [file_name])\n", (352, 370), False, 'from llama_index import SimpleDirectoryReader, GPTListIndex, LLMPredictor\n'), ((1270, 1285), 'pathlib.Path', 'Path', (['file_name'], {}), '(file_name)\n', (1274, 1285), False, 'from pathlib import Path\n')]
# Copyright © 2024 Pathway """ Pathway vector search server and client. The server reads source documents and build a vector index over them, then starts serving HTTP requests. The client queries the server and returns matching documents. """ import asyncio import functools import json import logging import threading from collections.abc import Callable, Coroutine from typing import TYPE_CHECKING import jmespath import numpy as np import requests import pathway as pw import pathway.xpacks.llm.parsers import pathway.xpacks.llm.splitters from pathway.stdlib.ml import index from pathway.stdlib.ml.classifiers import _knn_lsh if TYPE_CHECKING: import langchain_core.documents import langchain_core.embeddings import llama_index.core.schema def _unwrap_udf(func): if isinstance(func, pw.UDF): return func.__wrapped__ return func # https://stackoverflow.com/a/75094151 class _RunThread(threading.Thread): def __init__(self, coroutine): self.coroutine = coroutine self.result = None super().__init__() def run(self): self.result = asyncio.run(self.coroutine) def _run_async(coroutine): try: loop = asyncio.get_running_loop() except RuntimeError: loop = None if loop and loop.is_running(): thread = _RunThread(coroutine) thread.start() thread.join() return thread.result else: return asyncio.run(coroutine) def _coerce_sync(func: Callable) -> Callable: if asyncio.iscoroutinefunction(func): @functools.wraps(func) def wrapper(*args, **kwargs): return _run_async(func(*args, **kwargs)) return wrapper else: return func class VectorStoreServer: """ Builds a document indexing pipeline and starts an HTTP REST server for nearest neighbors queries. Args: - docs: pathway tables typically coming out of connectors which contain source documents. - embedder: callable that embeds a single document - parser: callable that parses file contents into a list of documents - splitter: callable that splits long documents """ def __init__( self, *docs: pw.Table, embedder: Callable[[str], list[float] | Coroutine], parser: Callable[[bytes], list[tuple[str, dict]]] | None = None, splitter: Callable[[str], list[tuple[str, dict]]] | None = None, doc_post_processors: ( list[Callable[[str, dict], tuple[str, dict]]] | None ) = None, index_params: dict | None = None, ): self.docs = docs self.parser: Callable[[bytes], list[tuple[str, dict]]] = _unwrap_udf( parser if parser is not None else pathway.xpacks.llm.parsers.ParseUtf8() ) self.doc_post_processors = [] if doc_post_processors: self.doc_post_processors = [ _unwrap_udf(processor) for processor in doc_post_processors if processor is not None ] self.splitter = _unwrap_udf( splitter if splitter is not None else pathway.xpacks.llm.splitters.null_splitter ) self.embedder = _unwrap_udf(embedder) # detect the dimensionality of the embeddings self.embedding_dimension = len(_coerce_sync(self.embedder)(".")) logging.debug("Embedder has dimension %s", self.embedding_dimension) DEFAULT_INDEX_PARAMS = dict(distance_type="cosine") if index_params is not None: DEFAULT_INDEX_PARAMS.update(index_params) self.index_params = DEFAULT_INDEX_PARAMS self._graph = self._build_graph() @classmethod def from_langchain_components( cls, *docs, embedder: "langchain_core.embeddings.Embeddings", parser: Callable[[bytes], list[tuple[str, dict]]] | None = None, splitter: "langchain_core.documents.BaseDocumentTransformer | None" = None, **kwargs, ): """ Initializes VectorStoreServer by using LangChain components. Args: - docs: pathway tables typically coming out of connectors which contain source documents - embedder: Langchain component for embedding documents - parser: callable that parses file contents into a list of documents - splitter: Langchaing component for splitting documents into parts """ try: from langchain_core.documents import Document except ImportError: raise ImportError( "Please install langchain_core: `pip install langchain_core`" ) generic_splitter = None if splitter: generic_splitter = lambda x: [ # noqa (doc.page_content, doc.metadata) for doc in splitter.transform_documents([Document(page_content=x)]) ] async def generic_embedded(x: str): res = await embedder.aembed_documents([x]) return res[0] return cls( *docs, embedder=generic_embedded, parser=parser, splitter=generic_splitter, **kwargs, ) @classmethod def from_llamaindex_components( cls, *docs, transformations: list["llama_index.core.schema.TransformComponent"], parser: Callable[[bytes], list[tuple[str, dict]]] | None = None, **kwargs, ): """ Initializes VectorStoreServer by using LlamaIndex TransformComponents. Args: - docs: pathway tables typically coming out of connectors which contain source documents - transformations: list of LlamaIndex components. The last component in this list is required to inherit from LlamaIndex `BaseEmbedding` - parser: callable that parses file contents into a list of documents """ try: from llama_index.core.base.embeddings.base import BaseEmbedding from llama_index.core.ingestion.pipeline import run_transformations from llama_index.core.schema import BaseNode, MetadataMode, TextNode except ImportError: raise ImportError( "Please install llama-index-core: `pip install llama-index-core`" ) try: from llama_index.legacy.embeddings.base import ( BaseEmbedding as LegacyBaseEmbedding, ) legacy_llama_index_not_imported = True except ImportError: legacy_llama_index_not_imported = False def node_transformer(x: str) -> list[BaseNode]: return [TextNode(text=x)] def node_to_pathway(x: list[BaseNode]) -> list[tuple[str, dict]]: return [ (node.get_content(metadata_mode=MetadataMode.NONE), node.extra_info) for node in x ] if transformations is None or not transformations: raise ValueError("Transformations list cannot be None or empty.") if not isinstance(transformations[-1], BaseEmbedding) and ( legacy_llama_index_not_imported or not isinstance(transformations[-1], LegacyBaseEmbedding) ): raise ValueError( f"Last step of transformations should be an instance of {BaseEmbedding.__name__}, " f"found {type(transformations[-1])}." ) embedder: BaseEmbedding = transformations.pop() async def embedding_callable(x: str) -> list[float]: embedding = await embedder.aget_text_embedding(x) return embedding def generic_transformer(x: str) -> list[tuple[str, dict]]: starting_node = node_transformer(x) final_node = run_transformations(starting_node, transformations) return node_to_pathway(final_node) return VectorStoreServer( *docs, embedder=embedding_callable, parser=parser, splitter=generic_transformer, **kwargs, ) def _build_graph(self) -> dict: """ Builds the pathway computation graph for indexing documents and serving queries. """ docs_s = self.docs if not docs_s: raise ValueError( """Please provide at least one data source, e.g. read files from disk: pw.io.fs.read('./sample_docs', format='binary', mode='static', with_metadata=True) """ ) if len(docs_s) == 1: (docs,) = docs_s else: docs: pw.Table = docs_s[0].concat_reindex(*docs_s[1:]) # type: ignore @pw.udf def parse_doc(data: bytes, metadata) -> list[pw.Json]: rets = self.parser(data) metadata = metadata.value return [dict(text=ret[0], metadata={**metadata, **ret[1]}) for ret in rets] # type: ignore parsed_docs = docs.select(data=parse_doc(docs.data, docs._metadata)).flatten( pw.this.data ) @pw.udf def post_proc_docs(data_json: pw.Json) -> pw.Json: data: dict = data_json.value # type:ignore text = data["text"] metadata = data["metadata"] for processor in self.doc_post_processors: text, metadata = processor(text, metadata) return dict(text=text, metadata=metadata) # type: ignore parsed_docs = parsed_docs.select(data=post_proc_docs(pw.this.data)) @pw.udf def split_doc(data_json: pw.Json) -> list[pw.Json]: data: dict = data_json.value # type:ignore text = data["text"] metadata = data["metadata"] rets = self.splitter(text) return [ dict(text=ret[0], metadata={**metadata, **ret[1]}) # type:ignore for ret in rets ] chunked_docs = parsed_docs.select(data=split_doc(pw.this.data)).flatten( pw.this.data ) if asyncio.iscoroutinefunction(self.embedder): @pw.udf async def embedder(txt): result = await self.embedder(txt) return np.asarray(result) else: @pw.udf def embedder(txt): result = self.embedder(txt) return np.asarray(result) chunked_docs += chunked_docs.select( embedding=embedder(pw.this.data["text"].as_str()) ) knn_index = index.KNNIndex( chunked_docs.embedding, chunked_docs, n_dimensions=self.embedding_dimension, metadata=chunked_docs.data["metadata"], **self.index_params, # type:ignore ) parsed_docs += parsed_docs.select( modified=pw.this.data["metadata"]["modified_at"].as_int(), indexed=pw.this.data["metadata"]["seen_at"].as_int(), path=pw.this.data["metadata"]["path"].as_str(), ) stats = parsed_docs.reduce( count=pw.reducers.count(), last_modified=pw.reducers.max(pw.this.modified), last_indexed=pw.reducers.max(pw.this.indexed), paths=pw.reducers.tuple(pw.this.path), ) return locals() class StatisticsQuerySchema(pw.Schema): pass class QueryResultSchema(pw.Schema): result: pw.Json class InputResultSchema(pw.Schema): result: list[pw.Json] @pw.table_transformer def statistics_query( self, info_queries: pw.Table[StatisticsQuerySchema] ) -> pw.Table[QueryResultSchema]: stats = self._graph["stats"] # VectorStore statistics computation @pw.udf def format_stats(counts, last_modified, last_indexed) -> pw.Json: if counts is not None: response = { "file_count": counts, "last_modified": last_modified, "last_indexed": last_indexed, } else: response = { "file_count": 0, "last_modified": None, "last_indexed": None, } return pw.Json(response) info_results = info_queries.join_left(stats, id=info_queries.id).select( result=format_stats(stats.count, stats.last_modified, stats.last_indexed) ) return info_results class FilterSchema(pw.Schema): metadata_filter: str | None = pw.column_definition( default_value=None, description="Metadata filter in JMESPath format" ) filepath_globpattern: str | None = pw.column_definition( default_value=None, description="An optional Glob pattern for the file path" ) InputsQuerySchema = FilterSchema @staticmethod def merge_filters(queries: pw.Table): @pw.udf def _get_jmespath_filter( metadata_filter: str, filepath_globpattern: str ) -> str | None: ret_parts = [] if metadata_filter: ret_parts.append(f"({metadata_filter})") if filepath_globpattern: ret_parts.append(f'globmatch(`"{filepath_globpattern}"`, path)') if ret_parts: return " && ".join(ret_parts) return None queries = queries.without( *VectorStoreServer.FilterSchema.__columns__.keys() ) + queries.select( metadata_filter=_get_jmespath_filter( pw.this.metadata_filter, pw.this.filepath_globpattern ) ) return queries @pw.table_transformer def inputs_query( self, input_queries: pw.Table[InputsQuerySchema] # type:ignore ) -> pw.Table[InputResultSchema]: docs = self._graph["docs"] # TODO: compare this approach to first joining queries to dicuments, then filtering, # then grouping to get each response. # The "dumb" tuple approach has more work precomputed for an all inputs query all_metas = docs.reduce(metadatas=pw.reducers.tuple(pw.this._metadata)) input_queries = self.merge_filters(input_queries) @pw.udf def format_inputs( metadatas: list[pw.Json] | None, metadata_filter: str | None ) -> list[pw.Json]: metadatas: list = metadatas if metadatas is not None else [] # type:ignore assert metadatas is not None if metadata_filter: metadatas = [ m for m in metadatas if jmespath.search( metadata_filter, m.value, options=_knn_lsh._glob_options ) ] return metadatas input_results = input_queries.join_left(all_metas, id=input_queries.id).select( all_metas.metadatas, input_queries.metadata_filter ) input_results = input_results.select( result=format_inputs(pw.this.metadatas, pw.this.metadata_filter) ) return input_results class RetrieveQuerySchema(pw.Schema): query: str = pw.column_definition( description="Your query for the similarity search", example="Pathway data processing framework", ) k: int = pw.column_definition( description="The number of documents to provide", example=2 ) metadata_filter: str | None = pw.column_definition( default_value=None, description="Metadata filter in JMESPath format" ) filepath_globpattern: str | None = pw.column_definition( default_value=None, description="An optional Glob pattern for the file path" ) @pw.table_transformer def retrieve_query( self, retrieval_queries: pw.Table[RetrieveQuerySchema] ) -> pw.Table[QueryResultSchema]: embedder = self._graph["embedder"] knn_index = self._graph["knn_index"] # Relevant document search retrieval_queries = self.merge_filters(retrieval_queries) retrieval_queries += retrieval_queries.select( embedding=embedder(pw.this.query), ) retrieval_results = retrieval_queries + knn_index.get_nearest_items( retrieval_queries.embedding, k=pw.this.k, collapse_rows=True, metadata_filter=retrieval_queries.metadata_filter, with_distances=True, ).select( result=pw.this.data, dist=pw.this.dist, ) retrieval_results = retrieval_results.select( result=pw.apply_with_type( lambda x, y: pw.Json( sorted( [{**res.value, "dist": dist} for res, dist in zip(x, y)], key=lambda x: x["dist"], # type: ignore ) ), pw.Json, pw.this.result, pw.this.dist, ) ) return retrieval_results def run_server( self, host, port, threaded: bool = False, with_cache: bool = True, cache_backend: ( pw.persistence.Backend | None ) = pw.persistence.Backend.filesystem("./Cache"), ): """ Builds the document processing pipeline and runs it. Args: - host: host to bind the HTTP listener - port: to bind the HTTP listener - threaded: if True, run in a thread. Else block computation - with_cache: if True, embedding requests for the same contents are cached - cache_backend: the backend to use for caching if it is enabled. The default is the disk cache, hosted locally in the folder ``./Cache``. You can use ``Backend`` class of the [`persistence API`](/developers/api-docs/persistence-api/#pathway.persistence.Backend) to override it. Returns: If threaded, return the Thread object. Else, does not return. """ webserver = pw.io.http.PathwayWebserver(host=host, port=port, with_cors=True) # TODO(move into webserver??) def serve(route, schema, handler, documentation): queries, writer = pw.io.http.rest_connector( webserver=webserver, route=route, methods=("GET", "POST"), schema=schema, autocommit_duration_ms=50, delete_completed_queries=True, documentation=documentation, ) writer(handler(queries)) serve( "/v1/retrieve", self.RetrieveQuerySchema, self.retrieve_query, pw.io.http.EndpointDocumentation( summary="Do a similarity search for your query", description="Request the given number of documents from the " "realtime-maintained index.", method_types=("GET",), ), ) serve( "/v1/statistics", self.StatisticsQuerySchema, self.statistics_query, pw.io.http.EndpointDocumentation( summary="Get current indexer stats", description="Request for the basic stats of the indexer process. " "It returns the number of documents that are currently present in the " "indexer and the time the last of them was added.", method_types=("GET",), ), ) serve( "/v1/inputs", self.InputsQuerySchema, self.inputs_query, pw.io.http.EndpointDocumentation( summary="Get indexed documents list", description="Request for the list of documents present in the indexer. " "It returns the list of metadata objects.", method_types=("GET",), ), ) def run(): if with_cache: if cache_backend is None: raise ValueError( "Cache usage was requested but the backend is unspecified" ) persistence_config = pw.persistence.Config.simple_config( cache_backend, persistence_mode=pw.PersistenceMode.UDF_CACHING, ) else: persistence_config = None pw.run( monitoring_level=pw.MonitoringLevel.NONE, persistence_config=persistence_config, ) if threaded: t = threading.Thread(target=run, name="VectorStoreServer") t.start() return t else: run() class VectorStoreClient: def __init__( self, host: str | None = None, port: int | None = None, url: str | None = None, timeout: int = 15, additional_headers: dict | None = None, ): """ A client you can use to query :py:class:`VectorStoreServer`. Please provide either the `url`, or `host` and `port`. Args: - host: host on which `:py:class:`VectorStoreServer` listens - port: port on which `:py:class:`VectorStoreServer` listens - url: url at which `:py:class:`VectorStoreServer` listens - timeout: timeout for the post requests in seconds """ err = "Either (`host` and `port`) or `url` must be provided, but not both." if url is not None: if host or port: raise ValueError(err) self.url = url else: if host is None: raise ValueError(err) port = port or 80 self.url = f"http://{host}:{port}" self.timeout = timeout self.additional_headers = additional_headers or {} def query( self, query: str, k: int = 3, metadata_filter: str | None = None ) -> list[dict]: """ Perform a query to the vector store and fetch results. Args: - query: - k: number of documents to be returned - metadata_filter: optional string representing the metadata filtering query in the JMESPath format. The search will happen only for documents satisfying this filtering. """ data = {"query": query, "k": k} if metadata_filter is not None: data["metadata_filter"] = metadata_filter url = self.url + "/v1/retrieve" response = requests.post( url, data=json.dumps(data), headers=self._get_request_headers(), timeout=self.timeout, ) responses = response.json() return sorted(responses, key=lambda x: x["dist"]) # Make an alias __call__ = query def get_vectorstore_statistics(self): """Fetch basic statistics about the vector store.""" url = self.url + "/v1/statistics" response = requests.post( url, json={}, headers=self._get_request_headers(), timeout=self.timeout, ) responses = response.json() return responses def get_input_files( self, metadata_filter: str | None = None, filepath_globpattern: str | None = None, ): """ Fetch information on documents in the the vector store. Args: metadata_filter: optional string representing the metadata filtering query in the JMESPath format. The search will happen only for documents satisfying this filtering. filepath_globpattern: optional glob pattern specifying which documents will be searched for this query. """ url = self.url + "/v1/inputs" response = requests.post( url, json={ "metadata_filter": metadata_filter, "filepath_globpattern": filepath_globpattern, }, headers=self._get_request_headers(), timeout=self.timeout, ) responses = response.json() return responses def _get_request_headers(self): request_headers = {"Content-Type": "application/json"} request_headers.update(self.additional_headers) return request_headers
[ "llama_index.core.schema.TextNode", "llama_index.core.ingestion.pipeline.run_transformations" ]
[((1515, 1548), 'asyncio.iscoroutinefunction', 'asyncio.iscoroutinefunction', (['func'], {}), '(func)\n', (1542, 1548), False, 'import asyncio\n'), ((1111, 1138), 'asyncio.run', 'asyncio.run', (['self.coroutine'], {}), '(self.coroutine)\n', (1122, 1138), False, 'import asyncio\n'), ((1192, 1218), 'asyncio.get_running_loop', 'asyncio.get_running_loop', ([], {}), '()\n', (1216, 1218), False, 'import asyncio\n'), ((1437, 1459), 'asyncio.run', 'asyncio.run', (['coroutine'], {}), '(coroutine)\n', (1448, 1459), False, 'import asyncio\n'), ((1560, 1581), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (1575, 1581), False, 'import functools\n'), ((3406, 3474), 'logging.debug', 'logging.debug', (['"""Embedder has dimension %s"""', 'self.embedding_dimension'], {}), "('Embedder has dimension %s', self.embedding_dimension)\n", (3419, 3474), False, 'import logging\n'), ((10102, 10144), 'asyncio.iscoroutinefunction', 'asyncio.iscoroutinefunction', (['self.embedder'], {}), '(self.embedder)\n', (10129, 10144), False, 'import asyncio\n'), ((10588, 10750), 'pathway.stdlib.ml.index.KNNIndex', 'index.KNNIndex', (['chunked_docs.embedding', 'chunked_docs'], {'n_dimensions': 'self.embedding_dimension', 'metadata': "chunked_docs.data['metadata']"}), "(chunked_docs.embedding, chunked_docs, n_dimensions=self.\n embedding_dimension, metadata=chunked_docs.data['metadata'], **self.\n index_params)\n", (10602, 10750), False, 'from pathway.stdlib.ml import index\n'), ((12607, 12702), 'pathway.column_definition', 'pw.column_definition', ([], {'default_value': 'None', 'description': '"""Metadata filter in JMESPath format"""'}), "(default_value=None, description=\n 'Metadata filter in JMESPath format')\n", (12627, 12702), True, 'import pathway as pw\n'), ((12763, 12866), 'pathway.column_definition', 'pw.column_definition', ([], {'default_value': 'None', 'description': '"""An optional Glob pattern for the file path"""'}), "(default_value=None, description=\n 'An optional Glob pattern for the file path')\n", (12783, 12866), True, 'import pathway as pw\n'), ((15276, 15397), 'pathway.column_definition', 'pw.column_definition', ([], {'description': '"""Your query for the similarity search"""', 'example': '"""Pathway data processing framework"""'}), "(description='Your query for the similarity search',\n example='Pathway data processing framework')\n", (15296, 15397), True, 'import pathway as pw\n'), ((15446, 15531), 'pathway.column_definition', 'pw.column_definition', ([], {'description': '"""The number of documents to provide"""', 'example': '(2)'}), "(description='The number of documents to provide',\n example=2)\n", (15466, 15531), True, 'import pathway as pw\n'), ((15588, 15683), 'pathway.column_definition', 'pw.column_definition', ([], {'default_value': 'None', 'description': '"""Metadata filter in JMESPath format"""'}), "(default_value=None, description=\n 'Metadata filter in JMESPath format')\n", (15608, 15683), True, 'import pathway as pw\n'), ((15744, 15847), 'pathway.column_definition', 'pw.column_definition', ([], {'default_value': 'None', 'description': '"""An optional Glob pattern for the file path"""'}), "(default_value=None, description=\n 'An optional Glob pattern for the file path')\n", (15764, 15847), True, 'import pathway as pw\n'), ((17383, 17427), 'pathway.persistence.Backend.filesystem', 'pw.persistence.Backend.filesystem', (['"""./Cache"""'], {}), "('./Cache')\n", (17416, 17427), True, 'import pathway as pw\n'), ((18253, 18318), 'pathway.io.http.PathwayWebserver', 'pw.io.http.PathwayWebserver', ([], {'host': 'host', 'port': 'port', 'with_cors': '(True)'}), '(host=host, port=port, with_cors=True)\n', (18280, 18318), True, 'import pathway as pw\n'), ((7862, 7913), 'llama_index.core.ingestion.pipeline.run_transformations', 'run_transformations', (['starting_node', 'transformations'], {}), '(starting_node, transformations)\n', (7881, 7913), False, 'from llama_index.core.ingestion.pipeline import run_transformations\n'), ((12309, 12326), 'pathway.Json', 'pw.Json', (['response'], {}), '(response)\n', (12316, 12326), True, 'import pathway as pw\n'), ((18446, 18640), 'pathway.io.http.rest_connector', 'pw.io.http.rest_connector', ([], {'webserver': 'webserver', 'route': 'route', 'methods': "('GET', 'POST')", 'schema': 'schema', 'autocommit_duration_ms': '(50)', 'delete_completed_queries': '(True)', 'documentation': 'documentation'}), "(webserver=webserver, route=route, methods=('GET',\n 'POST'), schema=schema, autocommit_duration_ms=50,\n delete_completed_queries=True, documentation=documentation)\n", (18471, 18640), True, 'import pathway as pw\n'), ((18924, 19132), 'pathway.io.http.EndpointDocumentation', 'pw.io.http.EndpointDocumentation', ([], {'summary': '"""Do a similarity search for your query"""', 'description': '"""Request the given number of documents from the realtime-maintained index."""', 'method_types': "('GET',)"}), "(summary=\n 'Do a similarity search for your query', description=\n 'Request the given number of documents from the realtime-maintained index.'\n , method_types=('GET',))\n", (18956, 19132), True, 'import pathway as pw\n'), ((19343, 19634), 'pathway.io.http.EndpointDocumentation', 'pw.io.http.EndpointDocumentation', ([], {'summary': '"""Get current indexer stats"""', 'description': '"""Request for the basic stats of the indexer process. It returns the number of documents that are currently present in the indexer and the time the last of them was added."""', 'method_types': "('GET',)"}), "(summary='Get current indexer stats',\n description=\n 'Request for the basic stats of the indexer process. It returns the number of documents that are currently present in the indexer and the time the last of them was added.'\n , method_types=('GET',))\n", (19375, 19634), True, 'import pathway as pw\n'), ((19853, 20074), 'pathway.io.http.EndpointDocumentation', 'pw.io.http.EndpointDocumentation', ([], {'summary': '"""Get indexed documents list"""', 'description': '"""Request for the list of documents present in the indexer. It returns the list of metadata objects."""', 'method_types': "('GET',)"}), "(summary='Get indexed documents list',\n description=\n 'Request for the list of documents present in the indexer. It returns the list of metadata objects.'\n , method_types=('GET',))\n", (19885, 20074), True, 'import pathway as pw\n'), ((20655, 20747), 'pathway.run', 'pw.run', ([], {'monitoring_level': 'pw.MonitoringLevel.NONE', 'persistence_config': 'persistence_config'}), '(monitoring_level=pw.MonitoringLevel.NONE, persistence_config=\n persistence_config)\n', (20661, 20747), True, 'import pathway as pw\n'), ((20828, 20882), 'threading.Thread', 'threading.Thread', ([], {'target': 'run', 'name': '"""VectorStoreServer"""'}), "(target=run, name='VectorStoreServer')\n", (20844, 20882), False, 'import threading\n'), ((6736, 6752), 'llama_index.core.schema.TextNode', 'TextNode', ([], {'text': 'x'}), '(text=x)\n', (6744, 6752), False, 'from llama_index.core.schema import BaseNode, MetadataMode, TextNode\n'), ((10277, 10295), 'numpy.asarray', 'np.asarray', (['result'], {}), '(result)\n', (10287, 10295), True, 'import numpy as np\n'), ((10430, 10448), 'numpy.asarray', 'np.asarray', (['result'], {}), '(result)\n', (10440, 10448), True, 'import numpy as np\n'), ((11133, 11152), 'pathway.reducers.count', 'pw.reducers.count', ([], {}), '()\n', (11150, 11152), True, 'import pathway as pw\n'), ((11180, 11213), 'pathway.reducers.max', 'pw.reducers.max', (['pw.this.modified'], {}), '(pw.this.modified)\n', (11195, 11213), True, 'import pathway as pw\n'), ((11240, 11272), 'pathway.reducers.max', 'pw.reducers.max', (['pw.this.indexed'], {}), '(pw.this.indexed)\n', (11255, 11272), True, 'import pathway as pw\n'), ((11292, 11323), 'pathway.reducers.tuple', 'pw.reducers.tuple', (['pw.this.path'], {}), '(pw.this.path)\n', (11309, 11323), True, 'import pathway as pw\n'), ((14203, 14239), 'pathway.reducers.tuple', 'pw.reducers.tuple', (['pw.this._metadata'], {}), '(pw.this._metadata)\n', (14220, 14239), True, 'import pathway as pw\n'), ((20423, 20527), 'pathway.persistence.Config.simple_config', 'pw.persistence.Config.simple_config', (['cache_backend'], {'persistence_mode': 'pw.PersistenceMode.UDF_CACHING'}), '(cache_backend, persistence_mode=pw.\n PersistenceMode.UDF_CACHING)\n', (20458, 20527), True, 'import pathway as pw\n'), ((22847, 22863), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (22857, 22863), False, 'import json\n'), ((14720, 14793), 'jmespath.search', 'jmespath.search', (['metadata_filter', 'm.value'], {'options': '_knn_lsh._glob_options'}), '(metadata_filter, m.value, options=_knn_lsh._glob_options)\n', (14735, 14793), False, 'import jmespath\n'), ((4913, 4937), 'langchain_core.documents.Document', 'Document', ([], {'page_content': 'x'}), '(page_content=x)\n', (4921, 4937), False, 'from langchain_core.documents import Document\n')]
# Imports from collections import defaultdict from time import sleep from llama_index import ( StorageContext, load_index_from_storage, set_global_service_context, ) from model_context import get_anyscale_context from templates import custom_template, yn_template import csv from tqdm import tqdm from openai import OpenAI client = OpenAI(base_url="https://api.endpoints.anyscale.com/v1", api_key="KEY") # DEBUG LOGS # import llama_index # llama_index.set_global_handler("simple") rag = True yn = False if rag: # Select Model print("Loading model context...") service_context = get_anyscale_context() set_global_service_context(service_context) # Load embedded data for RAG print("Loading RAG embeddings...") storage_context = StorageContext.from_defaults(persist_dir="vector-db-all") index = load_index_from_storage( service_context=service_context, storage_context=storage_context ) # Assemble Query Engine top_k = 3 if yn: query_engine = index.as_query_engine( text_qa_template=yn_template, similarity_top_k=top_k, # verbose=True, # streaming=True, ) else: query_engine = index.as_query_engine( text_qa_template=custom_template, similarity_top_k=top_k, # verbose=True, # streaming=True, ) def query_baseline(text: str, yn: bool) -> str: while True: if yn: content_msg = "Answer with yes/no and an explanation." else: content_msg = "Express whether the statement is true or false and explain why." #Your job is to try: chat_completion = client.chat.completions.create( model="meta-llama/Llama-2-7b-chat-hf", messages=[ { "role": "system", "content": content_msg, }, { "role": "user", "content": text, }, ], temperature=0, ) return chat_completion.choices[0].message.content.strip() except: print("BROKE: ", text) sleep(10) # Load evaluation data print("Loading evaluation data...") labeled_data = defaultdict(list) with open("../neg-exemplars-raw/exceptions.onlyValid.csv", "r") as full_data: data_reader = csv.DictReader(full_data) for sample in data_reader: labeled_data[sample["generic_new"]].append(sample["exemplar"]) print(f"{len(labeled_data)} generics loaded!") generics = list(labeled_data.keys()) # Evaluation Loop print("Beginning evaluation:") tie = 0 loss = 0 win = 0 with open(f"all_answers_{'rag' if rag else 'base'}_{'yn' if yn else 'tf'}.txt", 'w') as ans_file: for i in tqdm(range(1000), desc="Generic evaluation process"): sample = generics[i] for ext in ["All", "Not all"]: prompt = ext.lower() + " " + sample if yn: prompt = "Is it true that " + prompt[:-1].lower() + "?" #investigate if rag: response = query_engine.query(prompt) else: response = query_baseline(prompt, yn) # Record answer ans_file.write("INDEX: " + str(i) + '\n') ans_file.write("BASE INPUT: " + prompt + '\n') ans_file.write("RESPONSE: " + '\n' + str(response) + '\n\n') if yn: process = str(response).lower() false_count = process.count("no") - process.count("not") - process.count("now") - process.count("noc") - process.count("nor") - process.count("non") - process.count("nou") true_count = str(response).lower().count("yes") - str(response).lower().count("eyes") else: false_count = str(response).lower().count("false") true_count = str(response).lower().count("true") # print(false_count) # print(true_count) if ext == "All": good = false_count bad = true_count elif ext == "Not all": good = true_count bad = false_count ans_file.write("RESULT: ") if good > bad: win += 1 ans_file.write("WIN") elif bad > good: loss += 1 ans_file.write("LOSS") else: tie += 1 ans_file.write("TIE") ans_file.write('\n\n-------------------\n\n') print("Wins: ", win) print("Ties: ", tie) print("Loss: ", loss)
[ "llama_index.set_global_service_context", "llama_index.StorageContext.from_defaults", "llama_index.load_index_from_storage" ]
[((345, 416), 'openai.OpenAI', 'OpenAI', ([], {'base_url': '"""https://api.endpoints.anyscale.com/v1"""', 'api_key': '"""KEY"""'}), "(base_url='https://api.endpoints.anyscale.com/v1', api_key='KEY')\n", (351, 416), False, 'from openai import OpenAI\n'), ((2366, 2383), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2377, 2383), False, 'from collections import defaultdict\n'), ((606, 628), 'model_context.get_anyscale_context', 'get_anyscale_context', ([], {}), '()\n', (626, 628), False, 'from model_context import get_anyscale_context\n'), ((633, 676), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (659, 676), False, 'from llama_index import StorageContext, load_index_from_storage, set_global_service_context\n'), ((772, 829), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""vector-db-all"""'}), "(persist_dir='vector-db-all')\n", (800, 829), False, 'from llama_index import StorageContext, load_index_from_storage, set_global_service_context\n'), ((842, 936), 'llama_index.load_index_from_storage', 'load_index_from_storage', ([], {'service_context': 'service_context', 'storage_context': 'storage_context'}), '(service_context=service_context, storage_context=\n storage_context)\n', (865, 936), False, 'from llama_index import StorageContext, load_index_from_storage, set_global_service_context\n'), ((2480, 2505), 'csv.DictReader', 'csv.DictReader', (['full_data'], {}), '(full_data)\n', (2494, 2505), False, 'import csv\n'), ((2281, 2290), 'time.sleep', 'sleep', (['(10)'], {}), '(10)\n', (2286, 2290), False, 'from time import sleep\n')]
import os import glob import llama_index from llama_index.core import ServiceContext from llama_index.llms.anthropic import Anthropic from llama_index.core import SimpleDirectoryReader from llama_index.core.response_synthesizers import TreeSummarize # MODEL = "claude-3-opus-20240229" # MODEL = "claude-3-sonnet-20240229" MODEL = "claude-3-haiku-20240307" DATA_DIR = "data" SUMMARY_ROOT = "summaries" SUMMARY_DIR = os.path.join(SUMMARY_ROOT, MODEL).replace(":", "_") os.makedirs(SUMMARY_DIR, exist_ok=True) def saveText(path, text): "Save the given text to a file at the specified path." with open(path, "w") as f: f.write(text) def commentPaths(ticketNumber): "Returns a sorted list of file paths for the comments in Zendesk ticket `ticketNumber`." ticketDir = os.path.join(DATA_DIR, ticketNumber) return sorted(glob.glob(os.path.join(ticketDir, "*.txt"))) def summaryPath(ticketNumber): "Returns the file path for where we store the summary of Zendesk ticket `ticketNumber`." return os.path.join(SUMMARY_DIR, f"{ticketNumber}.txt") def totalSizeKB(paths): "Returns the total size in kilobytes of the files specified by `paths`." return sum(os.path.getsize(path) for path in paths) / 1024 def currentTime(): "Returns the current time in the format 'dd/mm/YYYY HH:MM:SS'." from datetime import datetime now = datetime.now() return now.strftime("%d/%m/%Y %H:%M:%S") llm = Anthropic(model=MODEL, max_tokens=1024) service_context = ServiceContext.from_defaults(llm=llm, embed_model="local") summarizer = TreeSummarize(service_context=service_context, verbose=False) SUMMARY_PROMPT = "The following text is a series of messages from a PaperCut support ticket. Summarise the whole conversation, including a list of particpants and who they work for, the problem or problems, the key events and date they occurred, and the current status of the ticket. Include any log lines from the messages." def summariseTicket(ticketNumber): "Summarizes the Zendesk ticket with the given `ticketNumber` and returns the summary text." input_files = commentPaths(ticketNumber) reader = SimpleDirectoryReader(input_files=input_files) docs = reader.load_data() texts = [doc.text for doc in docs] return summarizer.get_response(SUMMARY_PROMPT, texts) if __name__ == "__main__": import time print(f"MODEL={MODEL}") ticketNumbers = sorted(os.path.basename(path) for path in glob.glob(os.path.join(DATA_DIR, "*"))) ticketNumbers.sort(key=lambda k: (totalSizeKB(commentPaths(k)), k)) print(ticketNumbers) for i, ticketNumber in enumerate(ticketNumbers): paths = commentPaths(ticketNumber) print(f"{i:4}: {ticketNumber:8} {len(paths):3} comments {totalSizeKB(paths):7.3f} kb") t00 = time.time() summaries = {} durations = {} commentCounts = {} commentSizes = {} for i, ticketNumber in enumerate(ticketNumbers): commentCount = len(commentPaths(ticketNumber)) commentSize = totalSizeKB(commentPaths(ticketNumber)) print(f"{i:2}: ticketNumber={ticketNumber:8} {commentCount:3} comments {commentSize:7.3f} kb {currentTime()}", flush=True) if os.path.exists(summaryPath(ticketNumber)): print(f"Skipping ticket {ticketNumber}", flush=True) continue # Skip tickets that have already been summarised. t0 = time.time() summary = summariseTicket(ticketNumber) duration = time.time() - t0 description = f"{commentCount} comments {commentSize:7.3f} kb {duration:5.2f} sec summary={len(summary)}" print(f" {description}", flush=True) with open(summaryPath(ticketNumber), "w") as f: print(f"Summary: ticket {ticketNumber}: {description} -------------------------", file=f) print(summary, file=f) summaries[ticketNumber] = summary durations[ticketNumber] = duration commentCounts[ticketNumber] = commentCount commentSizes[ticketNumber] = commentSize duration = time.time() - t00 print("====================^^^====================") print(f"Duration: {duration:.2f} seconds") for i, ticketNumber in enumerate(ticketNumbers): commentCount = commentCounts[ticketNumber] commentSize = totalSizeKB(commentPaths(ticketNumber)) duration = durations[ticketNumber] print(f"{i:2}: {ticketNumber:8}: {commentCount:3} comments {commentSize:7.3f} kb {duration:5.2f} seconds")
[ "llama_index.llms.anthropic.Anthropic", "llama_index.core.response_synthesizers.TreeSummarize", "llama_index.core.SimpleDirectoryReader", "llama_index.core.ServiceContext.from_defaults" ]
[((470, 509), 'os.makedirs', 'os.makedirs', (['SUMMARY_DIR'], {'exist_ok': '(True)'}), '(SUMMARY_DIR, exist_ok=True)\n', (481, 509), False, 'import os\n'), ((1440, 1479), 'llama_index.llms.anthropic.Anthropic', 'Anthropic', ([], {'model': 'MODEL', 'max_tokens': '(1024)'}), '(model=MODEL, max_tokens=1024)\n', (1449, 1479), False, 'from llama_index.llms.anthropic import Anthropic\n'), ((1498, 1556), 'llama_index.core.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': '"""local"""'}), "(llm=llm, embed_model='local')\n", (1526, 1556), False, 'from llama_index.core import ServiceContext\n'), ((1570, 1631), 'llama_index.core.response_synthesizers.TreeSummarize', 'TreeSummarize', ([], {'service_context': 'service_context', 'verbose': '(False)'}), '(service_context=service_context, verbose=False)\n', (1583, 1631), False, 'from llama_index.core.response_synthesizers import TreeSummarize\n'), ((791, 827), 'os.path.join', 'os.path.join', (['DATA_DIR', 'ticketNumber'], {}), '(DATA_DIR, ticketNumber)\n', (803, 827), False, 'import os\n'), ((1027, 1075), 'os.path.join', 'os.path.join', (['SUMMARY_DIR', 'f"""{ticketNumber}.txt"""'], {}), "(SUMMARY_DIR, f'{ticketNumber}.txt')\n", (1039, 1075), False, 'import os\n'), ((1373, 1387), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1385, 1387), False, 'from datetime import datetime\n'), ((2149, 2195), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': 'input_files'}), '(input_files=input_files)\n', (2170, 2195), False, 'from llama_index.core import SimpleDirectoryReader\n'), ((2796, 2807), 'time.time', 'time.time', ([], {}), '()\n', (2805, 2807), False, 'import time\n'), ((417, 450), 'os.path.join', 'os.path.join', (['SUMMARY_ROOT', 'MODEL'], {}), '(SUMMARY_ROOT, MODEL)\n', (429, 450), False, 'import os\n'), ((3412, 3423), 'time.time', 'time.time', ([], {}), '()\n', (3421, 3423), False, 'import time\n'), ((4065, 4076), 'time.time', 'time.time', ([], {}), '()\n', (4074, 4076), False, 'import time\n'), ((856, 888), 'os.path.join', 'os.path.join', (['ticketDir', '"""*.txt"""'], {}), "(ticketDir, '*.txt')\n", (868, 888), False, 'import os\n'), ((2422, 2444), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (2438, 2444), False, 'import os\n'), ((3491, 3502), 'time.time', 'time.time', ([], {}), '()\n', (3500, 3502), False, 'import time\n'), ((1193, 1214), 'os.path.getsize', 'os.path.getsize', (['path'], {}), '(path)\n', (1208, 1214), False, 'import os\n'), ((2467, 2494), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""*"""'], {}), "(DATA_DIR, '*')\n", (2479, 2494), False, 'import os\n')]
import streamlit as st import os import openai import llama_index from llama_index.llms import OpenAI from llama_index.indices.composability import ComposableGraph from llama_index.storage import StorageContext from llama_index import TreeIndex, SummaryIndex from llama_index.indices.loading import load_graph_from_storage from llama_index.indices.loading import load_graph_from_storage from llama_index.storage import StorageContext import streamlit as st import openai openai.api_key= st.secrets['OPENAI_API_KEY'] st.set_page_config(page_title="Chat with AAPL 23 10-Qs, powered by Munger", page_icon=":chart_with_upwards_trend:", layout="centered", initial_sidebar_state="auto", menu_items=None) st.title("Chat with Munger :chart_with_upwards_trend: :eyeglasses:") if "messages" not in st.session_state.keys(): # Initialize the chat messages history st.session_state.messages = [ {"role": "assistant", "content": "Ask me a question about Apple's 2023 financial documents!"} ] @st.cache_resource(show_spinner=False) def load_data(): with st.spinner(text="Loading and indexing the AAPL 2023 10-Q docs – hang tight! This should take 1-2 minutes."): # Create a storage context using the persist directory storage_context = StorageContext.from_defaults(persist_dir='./storage') # Load the graph from the storage context graph = load_graph_from_storage(storage_context, root_id="APPL-23") query_engine = graph.as_query_engine(child_branch_factor=1) return query_engine query_engine =load_data() if prompt := st.chat_input("Your question"): # Prompt for user input and save to chat history st.session_state.messages.append({"role": "user", "content": prompt}) for message in st.session_state.messages: # Display the prior chat messages with st.chat_message(message["role"]): st.write(message["content"]) # If last message is not from assistant, generate a new response if st.session_state.messages[-1]["role"] != "assistant": with st.chat_message("assistant"): with st.spinner("Thinking..."): response = query_engine.query(prompt) #ipdb.set_trace() st.write(response.response) #st.code(response.get_formatted_sources()) message = {"role": "assistant", "content": response.response} st.session_state.messages.append(message) # Add response to message history
[ "llama_index.storage.StorageContext.from_defaults", "llama_index.indices.loading.load_graph_from_storage" ]
[((520, 709), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Chat with AAPL 23 10-Qs, powered by Munger"""', 'page_icon': '""":chart_with_upwards_trend:"""', 'layout': '"""centered"""', 'initial_sidebar_state': '"""auto"""', 'menu_items': 'None'}), "(page_title='Chat with AAPL 23 10-Qs, powered by Munger',\n page_icon=':chart_with_upwards_trend:', layout='centered',\n initial_sidebar_state='auto', menu_items=None)\n", (538, 709), True, 'import streamlit as st\n'), ((702, 770), 'streamlit.title', 'st.title', (['"""Chat with Munger :chart_with_upwards_trend: :eyeglasses:"""'], {}), "('Chat with Munger :chart_with_upwards_trend: :eyeglasses:')\n", (710, 770), True, 'import streamlit as st\n'), ((1002, 1039), 'streamlit.cache_resource', 'st.cache_resource', ([], {'show_spinner': '(False)'}), '(show_spinner=False)\n', (1019, 1039), True, 'import streamlit as st\n'), ((793, 816), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (814, 816), True, 'import streamlit as st\n'), ((1593, 1623), 'streamlit.chat_input', 'st.chat_input', (['"""Your question"""'], {}), "('Your question')\n", (1606, 1623), True, 'import streamlit as st\n'), ((1678, 1747), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (1710, 1747), True, 'import streamlit as st\n'), ((1066, 1183), 'streamlit.spinner', 'st.spinner', ([], {'text': '"""Loading and indexing the AAPL 2023 10-Q docs – hang tight! This should take 1-2 minutes."""'}), "(text=\n 'Loading and indexing the AAPL 2023 10-Q docs – hang tight! This should take 1-2 minutes.'\n )\n", (1076, 1183), True, 'import streamlit as st\n'), ((1265, 1318), 'llama_index.storage.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""./storage"""'}), "(persist_dir='./storage')\n", (1293, 1318), False, 'from llama_index.storage import StorageContext\n'), ((1386, 1445), 'llama_index.indices.loading.load_graph_from_storage', 'load_graph_from_storage', (['storage_context'], {'root_id': '"""APPL-23"""'}), "(storage_context, root_id='APPL-23')\n", (1409, 1445), False, 'from llama_index.indices.loading import load_graph_from_storage\n'), ((1834, 1866), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (1849, 1866), True, 'import streamlit as st\n'), ((1876, 1904), 'streamlit.write', 'st.write', (["message['content']"], {}), "(message['content'])\n", (1884, 1904), True, 'import streamlit as st\n'), ((2045, 2073), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (2060, 2073), True, 'import streamlit as st\n'), ((2088, 2113), 'streamlit.spinner', 'st.spinner', (['"""Thinking..."""'], {}), "('Thinking...')\n", (2098, 2113), True, 'import streamlit as st\n'), ((2207, 2234), 'streamlit.write', 'st.write', (['response.response'], {}), '(response.response)\n', (2215, 2234), True, 'import streamlit as st\n'), ((2376, 2417), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (['message'], {}), '(message)\n', (2408, 2417), True, 'import streamlit as st\n')]
get_ipython().run_line_magic('pip', 'install llama-index-callbacks-wandb') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import os from getpass import getpass if os.getenv("OPENAI_API_KEY") is None: os.environ["OPENAI_API_KEY"] = getpass( "Paste your OpenAI key from:" " https://platform.openai.com/account/api-keys\n" ) assert os.getenv("OPENAI_API_KEY", "").startswith( "sk-" ), "This doesn't look like a valid OpenAI API key" print("OpenAI API key configured") from llama_index.core.callbacks import CallbackManager from llama_index.core.callbacks import LlamaDebugHandler from llama_index.callbacks.wandb import WandbCallbackHandler from llama_index.core import ( VectorStoreIndex, SimpleDirectoryReader, SimpleKeywordTableIndex, StorageContext, ) from llama_index.llms.openai import OpenAI from llama_index.core import Settings Settings.llm = OpenAI(model="gpt-4", temperature=0) import llama_index.core from llama_index.core import set_global_handler set_global_handler("wandb", run_args={"project": "llamaindex"}) wandb_callback = llama_index.core.global_handler llama_debug = LlamaDebugHandler(print_trace_on_end=True) run_args = dict( project="llamaindex", ) wandb_callback = WandbCallbackHandler(run_args=run_args) Settings.callback_manager = CallbackManager([llama_debug, wandb_callback]) get_ipython().system("mkdir -p 'data/paul_graham/'") get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt'") docs = SimpleDirectoryReader("./data/paul_graham/").load_data() index = VectorStoreIndex.from_documents(docs) wandb_callback.persist_index(index, index_name="simple_vector_store") from llama_index.core import load_index_from_storage storage_context = wandb_callback.load_storage_context( artifact_url="ayut/llamaindex/simple_vector_store:v0" ) index = load_index_from_storage( storage_context, ) query_engine = index.as_query_engine() response = query_engine.query("What did the author do growing up?") print(response, sep="\n") wandb_callback.finish()
[ "llama_index.core.callbacks.LlamaDebugHandler", "llama_index.core.callbacks.CallbackManager", "llama_index.core.set_global_handler", "llama_index.callbacks.wandb.WandbCallbackHandler", "llama_index.core.VectorStoreIndex.from_documents", "llama_index.core.SimpleDirectoryReader", "llama_index.llms.openai.OpenAI", "llama_index.core.load_index_from_storage" ]
[((926, 962), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-4"""', 'temperature': '(0)'}), "(model='gpt-4', temperature=0)\n", (932, 962), False, 'from llama_index.llms.openai import OpenAI\n'), ((1040, 1103), 'llama_index.core.set_global_handler', 'set_global_handler', (['"""wandb"""'], {'run_args': "{'project': 'llamaindex'}"}), "('wandb', run_args={'project': 'llamaindex'})\n", (1058, 1103), False, 'from llama_index.core import set_global_handler\n'), ((1170, 1212), 'llama_index.core.callbacks.LlamaDebugHandler', 'LlamaDebugHandler', ([], {'print_trace_on_end': '(True)'}), '(print_trace_on_end=True)\n', (1187, 1212), False, 'from llama_index.core.callbacks import LlamaDebugHandler\n'), ((1277, 1316), 'llama_index.callbacks.wandb.WandbCallbackHandler', 'WandbCallbackHandler', ([], {'run_args': 'run_args'}), '(run_args=run_args)\n', (1297, 1316), False, 'from llama_index.callbacks.wandb import WandbCallbackHandler\n'), ((1346, 1392), 'llama_index.core.callbacks.CallbackManager', 'CallbackManager', (['[llama_debug, wandb_callback]'], {}), '([llama_debug, wandb_callback])\n', (1361, 1392), False, 'from llama_index.core.callbacks import CallbackManager\n'), ((1716, 1753), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['docs'], {}), '(docs)\n', (1747, 1753), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, SimpleKeywordTableIndex, StorageContext\n'), ((2008, 2048), 'llama_index.core.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (2031, 2048), False, 'from llama_index.core import load_index_from_storage\n'), ((190, 217), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (199, 217), False, 'import os\n'), ((262, 357), 'getpass.getpass', 'getpass', (['"""Paste your OpenAI key from: https://platform.openai.com/account/api-keys\n"""'], {}), "(\n 'Paste your OpenAI key from: https://platform.openai.com/account/api-keys\\n'\n )\n", (269, 357), False, 'from getpass import getpass\n'), ((380, 411), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""', '""""""'], {}), "('OPENAI_API_KEY', '')\n", (389, 411), False, 'import os\n'), ((1649, 1693), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""./data/paul_graham/"""'], {}), "('./data/paul_graham/')\n", (1670, 1693), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, SimpleKeywordTableIndex, StorageContext\n')]
import streamlit as st import llama_index from llama_index import StorageContext, load_index_from_storage from llama_index.query_engine import RetrieverQueryEngine from llama_index.storage.docstore import SimpleDocumentStore from llama_index.vector_stores import SimpleVectorStore from llama_index.storage.index_store import SimpleIndexStore from llama_index import KeywordTableIndex from llama_index.indices.keyword_table import SimpleKeywordTableIndex from llama_index import ResponseSynthesizer from llama_index.indices.postprocessor import SimilarityPostprocessor from llama_index.retrievers import VectorIndexRetriever from llama_index.retrievers import ListIndexRetriever from llama_index.retrievers import TreeRootRetriever from llama_index.indices.keyword_table.retrievers import KeywordTableGPTRetriever from llama_index.indices.keyword_table import GPTSimpleKeywordTableIndex from llama_index.indices.keyword_table.retrievers import KeywordTableRAKERetriever from llama_index.indices.keyword_table.retrievers import KeywordTableSimpleRetriever from llama_index import Prompt from llama_index import LLMPredictor from langchain.chat_models import ChatOpenAI from llama_index import ServiceContext def main(): st.title("Llama Index App") # Select indexes index_names = ["vector_store", "table", "tree", "list"] index_choices = st.multiselect("Select indexes", index_names) # Load indexes from storage contexts indices = [] for index_name in index_choices: storage_context = StorageContext.from_defaults( docstore=SimpleDocumentStore.from_persist_dir(persist_dir=index_name), vector_store=SimpleVectorStore.from_persist_dir(persist_dir=index_name), index_store=SimpleIndexStore.from_persist_dir(persist_dir=index_name), ) index = load_index_from_storage(storage_context) indices.append(index) # Prompt user for query query = st.text_input("Enter your query") # Query the indexes response = None for index in indices: TEMPLATE_STR = ( "We have provided context information below. \n" "---------------------\n" "{context_str}" "\n---------------------\n" "Given this information, please answer the question: {query_str}\n" ) QA_TEMPLATE = Prompt(TEMPLATE_STR) llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo", streaming=True)) service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, chunk_size=1024) query_engine = index.as_query_engine( service_context=service_context, text_qa_template=QA_TEMPLATE, similarity_top_k=3, streaming=True, ) response = query_engine.query(query) st.subheader(f"Results from {index.__class__.__name__}") # Display the response if response: # formatted_sources = response.get_formatted_sources() st.text(response) print(response) if __name__ == "__main__": main() ###### working ########
[ "llama_index.storage.docstore.SimpleDocumentStore.from_persist_dir", "llama_index.ServiceContext.from_defaults", "llama_index.storage.index_store.SimpleIndexStore.from_persist_dir", "llama_index.Prompt", "llama_index.load_index_from_storage", "llama_index.vector_stores.SimpleVectorStore.from_persist_dir" ]
[((1225, 1252), 'streamlit.title', 'st.title', (['"""Llama Index App"""'], {}), "('Llama Index App')\n", (1233, 1252), True, 'import streamlit as st\n'), ((1355, 1400), 'streamlit.multiselect', 'st.multiselect', (['"""Select indexes"""', 'index_names'], {}), "('Select indexes', index_names)\n", (1369, 1400), True, 'import streamlit as st\n'), ((1942, 1975), 'streamlit.text_input', 'st.text_input', (['"""Enter your query"""'], {}), "('Enter your query')\n", (1955, 1975), True, 'import streamlit as st\n'), ((1830, 1870), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (1853, 1870), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((2331, 2351), 'llama_index.Prompt', 'Prompt', (['TEMPLATE_STR'], {}), '(TEMPLATE_STR)\n', (2337, 2351), False, 'from llama_index import Prompt\n'), ((2490, 2564), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'chunk_size': '(1024)'}), '(llm_predictor=llm_predictor, chunk_size=1024)\n', (2518, 2564), False, 'from llama_index import ServiceContext\n'), ((2805, 2861), 'streamlit.subheader', 'st.subheader', (['f"""Results from {index.__class__.__name__}"""'], {}), "(f'Results from {index.__class__.__name__}')\n", (2817, 2861), True, 'import streamlit as st\n'), ((3003, 3020), 'streamlit.text', 'st.text', (['response'], {}), '(response)\n', (3010, 3020), True, 'import streamlit as st\n'), ((1574, 1634), 'llama_index.storage.docstore.SimpleDocumentStore.from_persist_dir', 'SimpleDocumentStore.from_persist_dir', ([], {'persist_dir': 'index_name'}), '(persist_dir=index_name)\n', (1610, 1634), False, 'from llama_index.storage.docstore import SimpleDocumentStore\n'), ((1661, 1719), 'llama_index.vector_stores.SimpleVectorStore.from_persist_dir', 'SimpleVectorStore.from_persist_dir', ([], {'persist_dir': 'index_name'}), '(persist_dir=index_name)\n', (1695, 1719), False, 'from llama_index.vector_stores import SimpleVectorStore\n'), ((1745, 1802), 'llama_index.storage.index_store.SimpleIndexStore.from_persist_dir', 'SimpleIndexStore.from_persist_dir', ([], {'persist_dir': 'index_name'}), '(persist_dir=index_name)\n', (1778, 1802), False, 'from llama_index.storage.index_store import SimpleIndexStore\n'), ((2393, 2462), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo"""', 'streaming': '(True)'}), "(temperature=0, model_name='gpt-3.5-turbo', streaming=True)\n", (2403, 2462), False, 'from langchain.chat_models import ChatOpenAI\n')]
from fastapi import FastAPI from fastapi.responses import JSONResponse, StreamingResponse from pydantic import BaseModel import os.path import llama_index from llama_index import ( VectorStoreIndex, SimpleDirectoryReader, StorageContext, ServiceContext, load_index_from_storage, set_global_service_context, PromptTemplate ) from llama_index.embeddings import HuggingFaceEmbedding from fastapi.middleware.cors import CORSMiddleware import logging import sys # Set global handler for LLaMA index llama_index.set_global_handler("simple") # Initialize FastAPI app app = FastAPI() # Define directory for persisting index PERSIST_DIR = "./storage" # Initialize embedding model embed_model = HuggingFaceEmbedding(model_name="OrdalieTech/Solon-embeddings-large-0.1") # Create service context with embedding model service_context = ServiceContext.from_defaults(embed_model=embed_model) set_global_service_context(service_context) # Load or create the index if not os.path.exists(PERSIST_DIR): documents = SimpleDirectoryReader("data").load_data() index = VectorStoreIndex.from_documents(documents) index.storage_context.persist(persist_dir=PERSIST_DIR) else: storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR) index = load_index_from_storage(storage_context) # Initialize query engine from index query_engine = index.as_query_engine(streaming=True, similarity_top_k=2) # Define custom prompt template qa_prompt_tmpl_str = ( "Context information is below.\n" "---------------------\n" "{context_str}\n" "---------------------\n" "Given the context information and not prior knowledge,Some rules to follow: 1. Avoid statements like 'Based on the context, ...' or 'The context information ...' or anything along those lines. " "answer the query in french and but remember you are chatbot trained on rh questions so always put that in perspective . you are named Rhym a chatbot created by the innovation team at BMCI \n" "Query: {query_str}\n" "Answer: " ) qa_prompt_tmpl = PromptTemplate(qa_prompt_tmpl_str) # Update query engine with custom prompt template query_engine.update_prompts( {"response_synthesizer:text_qa_template": qa_prompt_tmpl} ) # Define Pydantic model for query requests class Query(BaseModel): text: str @app.get("/query") async def query_index(query: str ): try: response_stream = query_engine.query(query) async def event_stream(): for text in response_stream.response_gen: yield f"data: {text}\n\n" # Send a special message or marker to indicate the end of the stream yield "data: END_OF_STREAM\n\n" return StreamingResponse(event_stream(), media_type="text/event-stream") except Exception as e: logging.error(f"Error during query processing: {str(e)}") return JSONResponse( status_code=503, content={"message": "LLM API is currently unavailable.", "error": str(e)} ) # Add CORS middleware to allow specific origins (or use '*' for all origins) origins = [ "*", # Allow all origins ] app.add_middleware( CORSMiddleware, allow_origins=origins, allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) # ... [rest of your code] # The main function remains unchanged if __name__ == "__main__": import uvicorn uvicorn.run(app, host="0.0.0.0", port=8000)
[ "llama_index.VectorStoreIndex.from_documents", "llama_index.ServiceContext.from_defaults", "llama_index.StorageContext.from_defaults", "llama_index.set_global_handler", "llama_index.SimpleDirectoryReader", "llama_index.embeddings.HuggingFaceEmbedding", "llama_index.load_index_from_storage", "llama_index.PromptTemplate", "llama_index.set_global_service_context" ]
[((528, 568), 'llama_index.set_global_handler', 'llama_index.set_global_handler', (['"""simple"""'], {}), "('simple')\n", (558, 568), False, 'import llama_index\n'), ((601, 610), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (608, 610), False, 'from fastapi import FastAPI\n'), ((722, 795), 'llama_index.embeddings.HuggingFaceEmbedding', 'HuggingFaceEmbedding', ([], {'model_name': '"""OrdalieTech/Solon-embeddings-large-0.1"""'}), "(model_name='OrdalieTech/Solon-embeddings-large-0.1')\n", (742, 795), False, 'from llama_index.embeddings import HuggingFaceEmbedding\n'), ((861, 914), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'embed_model'}), '(embed_model=embed_model)\n', (889, 914), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, StorageContext, ServiceContext, load_index_from_storage, set_global_service_context, PromptTemplate\n'), ((915, 958), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (941, 958), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, StorageContext, ServiceContext, load_index_from_storage, set_global_service_context, PromptTemplate\n'), ((2076, 2110), 'llama_index.PromptTemplate', 'PromptTemplate', (['qa_prompt_tmpl_str'], {}), '(qa_prompt_tmpl_str)\n', (2090, 2110), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, StorageContext, ServiceContext, load_index_from_storage, set_global_service_context, PromptTemplate\n'), ((1093, 1135), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (1124, 1135), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, StorageContext, ServiceContext, load_index_from_storage, set_global_service_context, PromptTemplate\n'), ((1223, 1276), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'PERSIST_DIR'}), '(persist_dir=PERSIST_DIR)\n', (1251, 1276), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, StorageContext, ServiceContext, load_index_from_storage, set_global_service_context, PromptTemplate\n'), ((1289, 1329), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (1312, 1329), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, StorageContext, ServiceContext, load_index_from_storage, set_global_service_context, PromptTemplate\n'), ((3437, 3480), 'uvicorn.run', 'uvicorn.run', (['app'], {'host': '"""0.0.0.0"""', 'port': '(8000)'}), "(app, host='0.0.0.0', port=8000)\n", (3448, 3480), False, 'import uvicorn\n'), ((1039, 1068), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""data"""'], {}), "('data')\n", (1060, 1068), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, StorageContext, ServiceContext, load_index_from_storage, set_global_service_context, PromptTemplate\n')]
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-hub-llama-packs-tables-chain-of-table-base') get_ipython().system('wget "https://github.com/ppasupat/WikiTableQuestions/releases/download/v1.0.2/WikiTableQuestions-1.0.2-compact.zip" -O data.zip') get_ipython().system('unzip data.zip') import pandas as pd df = pd.read_csv("./WikiTableQuestions/csv/200-csv/3.csv") df from llama_index.packs.tables.chain_of_table.base import ( ChainOfTableQueryEngine, serialize_table, ) from llama_index.core.llama_pack import download_llama_pack download_llama_pack( "ChainOfTablePack", "./chain_of_table_pack", skip_load=True, ) from llama_index.llms.openai import OpenAI llm = OpenAI(model="gpt-4-1106-preview") import phoenix as px import llama_index.core px.launch_app() llama_index.core.set_global_handler("arize_phoenix") import pandas as pd df = pd.read_csv("~/Downloads/WikiTableQuestions/csv/200-csv/11.csv") df query_engine = ChainOfTableQueryEngine(df, llm=llm, verbose=True) response = query_engine.query("Who won best Director in the 1972 Academy Awards?") str(response.response) import pandas as pd df = pd.read_csv("./WikiTableQuestions/csv/200-csv/42.csv") df query_engine = ChainOfTableQueryEngine(df, llm=llm, verbose=True) response = query_engine.query("What was the precipitation in inches during June?") str(response) from llama_index.core import PromptTemplate from llama_index.core.query_pipeline import QueryPipeline prompt_str = """\ Here's a serialized table. {serialized_table} Given this table please answer the question: {question} Answer: """ prompt = PromptTemplate(prompt_str) prompt_c = prompt.as_query_component(partial={"serialized_table": serialize_table(df)}) qp = QueryPipeline(chain=[prompt_c, llm]) response = qp.run("What was the precipitation in inches during June?") print(str(response)) import pandas as pd df = pd.read_csv("./WikiTableQuestions/csv/203-csv/114.csv") df query_engine = ChainOfTableQueryEngine(df, llm=llm, verbose=True) response = query_engine.query("Which televised ABC game had the greatest attendance?") print(str(response)) from llama_index.core import PromptTemplate from llama_index.core.query_pipeline import QueryPipeline prompt_str = """\ Here's a serialized table. {serialized_table} Given this table please answer the question: {question} Answer: """ prompt = PromptTemplate(prompt_str) prompt_c = prompt.as_query_component(partial={"serialized_table": serialize_table(df)}) qp = QueryPipeline(chain=[prompt_c, llm]) response = qp.run("Which televised ABC game had the greatest attendance?") print(str(response))
[ "llama_index.packs.tables.chain_of_table.base.ChainOfTableQueryEngine", "llama_index.core.PromptTemplate", "llama_index.packs.tables.chain_of_table.base.serialize_table", "llama_index.core.query_pipeline.QueryPipeline", "llama_index.core.llama_pack.download_llama_pack", "llama_index.llms.openai.OpenAI" ]
[((389, 442), 'pandas.read_csv', 'pd.read_csv', (['"""./WikiTableQuestions/csv/200-csv/3.csv"""'], {}), "('./WikiTableQuestions/csv/200-csv/3.csv')\n", (400, 442), True, 'import pandas as pd\n'), ((622, 707), 'llama_index.core.llama_pack.download_llama_pack', 'download_llama_pack', (['"""ChainOfTablePack"""', '"""./chain_of_table_pack"""'], {'skip_load': '(True)'}), "('ChainOfTablePack', './chain_of_table_pack', skip_load=True\n )\n", (641, 707), False, 'from llama_index.core.llama_pack import download_llama_pack\n'), ((770, 804), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-4-1106-preview"""'}), "(model='gpt-4-1106-preview')\n", (776, 804), False, 'from llama_index.llms.openai import OpenAI\n'), ((854, 869), 'phoenix.launch_app', 'px.launch_app', ([], {}), '()\n', (867, 869), True, 'import phoenix as px\n'), ((953, 1017), 'pandas.read_csv', 'pd.read_csv', (['"""~/Downloads/WikiTableQuestions/csv/200-csv/11.csv"""'], {}), "('~/Downloads/WikiTableQuestions/csv/200-csv/11.csv')\n", (964, 1017), True, 'import pandas as pd\n'), ((1040, 1090), 'llama_index.packs.tables.chain_of_table.base.ChainOfTableQueryEngine', 'ChainOfTableQueryEngine', (['df'], {'llm': 'llm', 'verbose': '(True)'}), '(df, llm=llm, verbose=True)\n', (1063, 1090), False, 'from llama_index.packs.tables.chain_of_table.base import ChainOfTableQueryEngine, serialize_table\n'), ((1230, 1284), 'pandas.read_csv', 'pd.read_csv', (['"""./WikiTableQuestions/csv/200-csv/42.csv"""'], {}), "('./WikiTableQuestions/csv/200-csv/42.csv')\n", (1241, 1284), True, 'import pandas as pd\n'), ((1307, 1357), 'llama_index.packs.tables.chain_of_table.base.ChainOfTableQueryEngine', 'ChainOfTableQueryEngine', (['df'], {'llm': 'llm', 'verbose': '(True)'}), '(df, llm=llm, verbose=True)\n', (1330, 1357), False, 'from llama_index.packs.tables.chain_of_table.base import ChainOfTableQueryEngine, serialize_table\n'), ((1708, 1734), 'llama_index.core.PromptTemplate', 'PromptTemplate', (['prompt_str'], {}), '(prompt_str)\n', (1722, 1734), False, 'from llama_index.core import PromptTemplate\n'), ((1831, 1867), 'llama_index.core.query_pipeline.QueryPipeline', 'QueryPipeline', ([], {'chain': '[prompt_c, llm]'}), '(chain=[prompt_c, llm])\n', (1844, 1867), False, 'from llama_index.core.query_pipeline import QueryPipeline\n'), ((1989, 2044), 'pandas.read_csv', 'pd.read_csv', (['"""./WikiTableQuestions/csv/203-csv/114.csv"""'], {}), "('./WikiTableQuestions/csv/203-csv/114.csv')\n", (2000, 2044), True, 'import pandas as pd\n'), ((2065, 2115), 'llama_index.packs.tables.chain_of_table.base.ChainOfTableQueryEngine', 'ChainOfTableQueryEngine', (['df'], {'llm': 'llm', 'verbose': '(True)'}), '(df, llm=llm, verbose=True)\n', (2088, 2115), False, 'from llama_index.packs.tables.chain_of_table.base import ChainOfTableQueryEngine, serialize_table\n'), ((2475, 2501), 'llama_index.core.PromptTemplate', 'PromptTemplate', (['prompt_str'], {}), '(prompt_str)\n', (2489, 2501), False, 'from llama_index.core import PromptTemplate\n'), ((2595, 2631), 'llama_index.core.query_pipeline.QueryPipeline', 'QueryPipeline', ([], {'chain': '[prompt_c, llm]'}), '(chain=[prompt_c, llm])\n', (2608, 2631), False, 'from llama_index.core.query_pipeline import QueryPipeline\n'), ((1801, 1820), 'llama_index.packs.tables.chain_of_table.base.serialize_table', 'serialize_table', (['df'], {}), '(df)\n', (1816, 1820), False, 'from llama_index.packs.tables.chain_of_table.base import ChainOfTableQueryEngine, serialize_table\n'), ((2568, 2587), 'llama_index.packs.tables.chain_of_table.base.serialize_table', 'serialize_table', (['df'], {}), '(df)\n', (2583, 2587), False, 'from llama_index.packs.tables.chain_of_table.base import ChainOfTableQueryEngine, serialize_table\n')]
import gradio as gr import os from datetime import datetime import logging import sys from llama_index import SimpleDirectoryReader import llama_index.readers.file.base import glob import numpy as np import soundfile as sf import shutil import openai import json import cv2 from llama_index import download_loader ImageCaptionReader = download_loader('ImageCaptionReader') openai.api_key = os.environ['OPENAI_API_KEY'] logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) # print('done processing import') with open('config.json', encoding='utf8') as f: config = json.load(f) def process_inputs(text: str, image: np.ndarray, video: str, audio: tuple, ): output = "" # # print('video', type(video), video) # # print('text', type(text), text) # # print('audio', type(audio), audio) # # print('image', type(image), image) if not text and image is not None and not video and audio is not None: return "Please upload at least one of the following: text, image, video, audio." timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") # Create a folder named 'media_files' if it doesn't exist os.makedirs(f"media_files/{timestamp}", exist_ok=True) if video: video_path = os.path.join("media_files", f"{timestamp}/video.mp4") # copy from "video" to "video_path" shutil.copyfile(video, video_path) # os.rename(video_path, video_path) ffmpeg_cmd = f'ffmpeg -i {video_path} -vf "select=not(mod(n\,100))" -vsync vfr media_files/{timestamp}/frame_%03d.jpg' os.system(ffmpeg_cmd) output += "Video processed and saved.\n" print("Video processed and saved.") # gr.Interface.update("Video saved.") if text: text_path = os.path.join("media_files", f"{timestamp}/text.txt") with open(text_path, "w", encoding='utf8') as f: f.write(text) output += "Text processed and saved: " + text + "\n" # print("Text processed and saved: " + text + "") # gr.Interface.update("Text processed and saved: " + "") if audio is not None: sr, audio = audio audio_path = os.path.join("media_files", f"{timestamp}/audio.mp3") sf.write(audio_path, audio, sr) output += "Audio processed and saved.\n" print("Audio processed and saved.") # gr.Interface.update("Audio saved.") if image is not None: image_path = os.path.join("media_files", f"{timestamp}/image.png") image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) cv2.imwrite(image_path, image) output += "Image processed and saved.\n" print("Image processed and saved.") # gr.Interface.update("Image saved.") root = f"media_files/{timestamp}" image_caption_prompt = "Question: Describe what you see in this image and if there are any dangers or emergencies there any dangers and how sever they are. Answer:" text_files = glob.glob(f'{root}/*.txt') text_content = '' if text_files: # print('processing text_files ...') text_content = SimpleDirectoryReader( input_files=text_files, file_extractor={ ".jpg": ImageCaptionReader(), ".png": ImageCaptionReader(), ".jpeg": ImageCaptionReader(), ".wav": llama_index.readers.file.video_audio_reader, ".mp4": llama_index.readers.file.video_audio_reader, } ).load_data() texts = [x.text for x in text_content] text = '\n\n'.join(texts) text_content = text.replace('"', "'").replace('\n', '. ') # print('done processing text_files') image_files = glob.glob(f'{root}/*.png') + glob.glob(f'{root}/*.jpg') image_content = '' if image_files: # print('processing image_files ...') image_content = SimpleDirectoryReader( input_files=image_files, file_extractor={ ".jpg": ImageCaptionReader(), ".png": ImageCaptionReader(), ".jpeg": ImageCaptionReader(), ".wav": llama_index.readers.file.video_audio_reader, ".mp4": llama_index.readers.file.video_audio_reader, } ).load_data() texts = [x.text for x in image_content] text = '\n\n'.join(texts) image_content = text.replace('"', "'").replace('\n', '. ') # print('done processing image_files') audio_files = glob.glob(f'{root}/*.mp3') audio_content = '' if audio_files: # print('processing audio_files ...') audio_content = SimpleDirectoryReader( input_files=audio_files, file_extractor={ ".jpg": ImageCaptionReader(), ".png": ImageCaptionReader(), ".jpeg": ImageCaptionReader(), ".mp3": llama_index.readers.file.video_audio_reader, ".mp4": llama_index.readers.file.video_audio_reader, } ).load_data() texts = [x.text for x in audio_content] text = '\n\n'.join(texts) audio_content = text.replace('"', "'").replace('\n', '. ') # print('done processing audio_files') video_files = glob.glob(f'{root}/*.mp4') video_content = '' if video_files: # print('processing video_files ...') video_content = SimpleDirectoryReader( input_files=video_files, file_extractor={ ".jpg": ImageCaptionReader(), ".png": ImageCaptionReader(), ".jpeg": ImageCaptionReader(), ".mp3": llama_index.readers.file.video_audio_reader, ".mp4": llama_index.readers.file.video_audio_reader, } ).load_data() texts = [x.text for x in video_content] text = '\n\n'.join(texts) video_content = text.replace('"', "'").replace('\n', '. ') # print('done processing video_files') ar2en = {v:k for (k,v) in config["en2ar"].items()} emergencies_en = [ar2en[k] for k in config['redirects']] system_prompt = f"""I want you to act as a 911 operator that understands Arabic. I will give you text and audio transcripts that the users upload in an emergency, and I need you to classify the different types of emergencies. The incoming information could be Arabic or English, and you must output only in English. The different types of emergencies are only one of {len(emergencies_en)}: {json.dumps(emergencies_en)} I will give you the information provided by the user bellow, and you should classify from the {len(emergencies_en)} types of emergencies. """ prompt = """ === User information for emergency """ if text_content: prompt += f'User text: "{text_content}"\n' if image_content: prompt += f'User uploaded an image of: "{image_content}"\n' if audio_content: prompt += f'User uploaded an audio, the text in that audio sounds like: "{audio_content} {video_content}" \n' prompt += """ === End of user information for emergency Now you must output only in JSON in the following format: {"emergency_class": string, "explaination_arabic": string} Note that "explaination_arabic" must be in Arabic. For the emergency_class, you must choose one of the following: """ + json.dumps(emergencies_en) # print('prompt', prompt) completion = openai.ChatCompletion.create( model="gpt-4", messages=[ {"role": "system", "content": system_prompt}, {"role": "user", "content": prompt}, ] ) # parse model JSON output content = completion.choices[0].message.content content = content.replace(",}", "}") # just in case # start from first "{" until the first "}" content = content[content.find("{") : content.find("}")+1] # print('ChatGPT response:', content) try: result = json.loads(content) except: result = { "emergency_class": "unknown", "explaination_arabic": "Could not parse output.: " + content } emergency_class_ar = config['en2ar'].get(result['emergency_class'], "غير معروف") redirects = config['redirects'].get(emergency_class_ar, ["<غير معروف>"]) output = f"""نوع الحالة: {emergency_class_ar} الجهات المسؤولة: - """ + ('\n - '.join(redirects)) + f"\n\nالشرح: {result['explaination_arabic']}" return output if output else "No input provided." video_input = gr.inputs.Video(optional=True, label="Input Video") text_input = gr.inputs.Textbox(lines=3, optional=True, label="Input Text") audio_input = gr.inputs.Audio(optional=True, label="Input Audio") image_input = gr.inputs.Image(optional=True, label="Input Image") output_text = gr.outputs.Textbox(label="Output Text") examples = [ # text_input, image_input, video_input, audio_input ["", None,"data/fire_at_gas_station.mp4", None,], ["", "data/small-car-accident.jpg", None, None], ["", "data/electrical-fire.jpg", None, None], ["", "data/major-car-accident.jpg", None, None], ["", "data/gettyimages-50908538-612x612.jpg", None, None], ["", None, None, "data/fire_at_gas_station.mp3",], ["السلام عليكم، أنا أتصل لأبلغ عن حريق كبير في مبنى سكني بشارع المنصور. يبدو أن النيران اندلعت في الطابق الثالث وتنتشر بسرورة. يرجى إرسال رجال الإطفاء فوراً", None, None, None], ["السلام عليكم، أنا أتصل لأبلغ عن حادثة تحرش حدثت لي في مترو الأنفاق بمحطة المرج. كان هناك رجل يلمسني بشكل غير لائق ويحاول مضايقتي. يرجى إرسال دورية أمنية للموقع فوراً", None, None, None], ["السلام عليكم، أنا أتصل لأبلغ عن سرقة تعرضت لها قبل قليل. شخصان قاما بسلب هاتفي الجوال ومحفظتي تحت تهديد السلاح. حدث ذلك في حي النزهة بالقرب من متجر السوبر ماركت. أرجو إرسال دورية أمنية وفتح تحقيق في الواقعة", None, None, None], ] iface = gr.Interface( fn=process_inputs, inputs=[text_input, image_input, video_input, audio_input], outputs=output_text, title="<img src='https://i.imgur.com/Qakrqvn.png' width='100' height='100'> منصة استجابة", description="تحديد نوع المخاطر والحالات الطارئة تلقائيا باستخدام الذكاء الاصطناعي,\nشبيها بتطبيق 'كلنا امن' بامكانك رفع نص او صور اومقطع او صوت وسيتم تحديد نوع الحالة والجهات المسؤولة عنها", examples=examples, cache_examples=True, ) # image = gr.Image("logo.png", style=(100, 100)) # iface.add(image) # "text-align: right;" # print('http://127.0.0.1:7860/?__theme=light') iface.launch( share=True, favicon_path='logo.png' )
[ "llama_index.download_loader" ]
[((337, 374), 'llama_index.download_loader', 'download_loader', (['"""ImageCaptionReader"""'], {}), "('ImageCaptionReader')\n", (352, 374), False, 'from llama_index import download_loader\n'), ((423, 481), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (442, 481), False, 'import logging\n'), ((8563, 8614), 'gradio.inputs.Video', 'gr.inputs.Video', ([], {'optional': '(True)', 'label': '"""Input Video"""'}), "(optional=True, label='Input Video')\n", (8578, 8614), True, 'import gradio as gr\n'), ((8628, 8689), 'gradio.inputs.Textbox', 'gr.inputs.Textbox', ([], {'lines': '(3)', 'optional': '(True)', 'label': '"""Input Text"""'}), "(lines=3, optional=True, label='Input Text')\n", (8645, 8689), True, 'import gradio as gr\n'), ((8704, 8755), 'gradio.inputs.Audio', 'gr.inputs.Audio', ([], {'optional': '(True)', 'label': '"""Input Audio"""'}), "(optional=True, label='Input Audio')\n", (8719, 8755), True, 'import gradio as gr\n'), ((8770, 8821), 'gradio.inputs.Image', 'gr.inputs.Image', ([], {'optional': '(True)', 'label': '"""Input Image"""'}), "(optional=True, label='Input Image')\n", (8785, 8821), True, 'import gradio as gr\n'), ((8837, 8876), 'gradio.outputs.Textbox', 'gr.outputs.Textbox', ([], {'label': '"""Output Text"""'}), "(label='Output Text')\n", (8855, 8876), True, 'import gradio as gr\n'), ((9894, 10355), 'gradio.Interface', 'gr.Interface', ([], {'fn': 'process_inputs', 'inputs': '[text_input, image_input, video_input, audio_input]', 'outputs': 'output_text', 'title': '"""<img src=\'https://i.imgur.com/Qakrqvn.png\' width=\'100\' height=\'100\'> منصة استجابة"""', 'description': '"""تحديد نوع المخاطر والحالات الطارئة تلقائيا باستخدام الذكاء الاصطناعي,\nشبيها بتطبيق \'كلنا امن\' بامكانك رفع نص او صور اومقطع او صوت وسيتم تحديد نوع الحالة والجهات المسؤولة عنها"""', 'examples': 'examples', 'cache_examples': '(True)'}), '(fn=process_inputs, inputs=[text_input, image_input,\n video_input, audio_input], outputs=output_text, title=\n "<img src=\'https://i.imgur.com/Qakrqvn.png\' width=\'100\' height=\'100\'> منصة استجابة"\n , description=\n """تحديد نوع المخاطر والحالات الطارئة تلقائيا باستخدام الذكاء الاصطناعي,\nشبيها بتطبيق \'كلنا امن\' بامكانك رفع نص او صور اومقطع او صوت وسيتم تحديد نوع الحالة والجهات المسؤولة عنها"""\n , examples=examples, cache_examples=True)\n', (9906, 10355), True, 'import gradio as gr\n'), ((513, 553), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (534, 553), False, 'import logging\n'), ((651, 663), 'json.load', 'json.load', (['f'], {}), '(f)\n', (660, 663), False, 'import json\n'), ((1216, 1270), 'os.makedirs', 'os.makedirs', (['f"""media_files/{timestamp}"""'], {'exist_ok': '(True)'}), "(f'media_files/{timestamp}', exist_ok=True)\n", (1227, 1270), False, 'import os\n'), ((3028, 3054), 'glob.glob', 'glob.glob', (['f"""{root}/*.txt"""'], {}), "(f'{root}/*.txt')\n", (3037, 3054), False, 'import glob\n'), ((4565, 4591), 'glob.glob', 'glob.glob', (['f"""{root}/*.mp3"""'], {}), "(f'{root}/*.mp3')\n", (4574, 4591), False, 'import glob\n'), ((5323, 5349), 'glob.glob', 'glob.glob', (['f"""{root}/*.mp4"""'], {}), "(f'{root}/*.mp4')\n", (5332, 5349), False, 'import glob\n'), ((7486, 7627), 'openai.ChatCompletion.create', 'openai.ChatCompletion.create', ([], {'model': '"""gpt-4"""', 'messages': "[{'role': 'system', 'content': system_prompt}, {'role': 'user', 'content':\n prompt}]"}), "(model='gpt-4', messages=[{'role': 'system',\n 'content': system_prompt}, {'role': 'user', 'content': prompt}])\n", (7514, 7627), False, 'import openai\n'), ((482, 501), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (499, 501), False, 'import logging\n'), ((1308, 1361), 'os.path.join', 'os.path.join', (['"""media_files"""', 'f"""{timestamp}/video.mp4"""'], {}), "('media_files', f'{timestamp}/video.mp4')\n", (1320, 1361), False, 'import os\n'), ((1415, 1449), 'shutil.copyfile', 'shutil.copyfile', (['video', 'video_path'], {}), '(video, video_path)\n', (1430, 1449), False, 'import shutil\n'), ((1638, 1659), 'os.system', 'os.system', (['ffmpeg_cmd'], {}), '(ffmpeg_cmd)\n', (1647, 1659), False, 'import os\n'), ((1834, 1886), 'os.path.join', 'os.path.join', (['"""media_files"""', 'f"""{timestamp}/text.txt"""'], {}), "('media_files', f'{timestamp}/text.txt')\n", (1846, 1886), False, 'import os\n'), ((2228, 2281), 'os.path.join', 'os.path.join', (['"""media_files"""', 'f"""{timestamp}/audio.mp3"""'], {}), "('media_files', f'{timestamp}/audio.mp3')\n", (2240, 2281), False, 'import os\n'), ((2290, 2321), 'soundfile.write', 'sf.write', (['audio_path', 'audio', 'sr'], {}), '(audio_path, audio, sr)\n', (2298, 2321), True, 'import soundfile as sf\n'), ((2509, 2562), 'os.path.join', 'os.path.join', (['"""media_files"""', 'f"""{timestamp}/image.png"""'], {}), "('media_files', f'{timestamp}/image.png')\n", (2521, 2562), False, 'import os\n'), ((2579, 2617), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2BGR'], {}), '(image, cv2.COLOR_RGB2BGR)\n', (2591, 2617), False, 'import cv2\n'), ((2626, 2656), 'cv2.imwrite', 'cv2.imwrite', (['image_path', 'image'], {}), '(image_path, image)\n', (2637, 2656), False, 'import cv2\n'), ((3778, 3804), 'glob.glob', 'glob.glob', (['f"""{root}/*.png"""'], {}), "(f'{root}/*.png')\n", (3787, 3804), False, 'import glob\n'), ((3807, 3833), 'glob.glob', 'glob.glob', (['f"""{root}/*.jpg"""'], {}), "(f'{root}/*.jpg')\n", (3816, 3833), False, 'import glob\n'), ((7410, 7436), 'json.dumps', 'json.dumps', (['emergencies_en'], {}), '(emergencies_en)\n', (7420, 7436), False, 'import json\n'), ((7998, 8017), 'json.loads', 'json.loads', (['content'], {}), '(content)\n', (8008, 8017), False, 'import json\n'), ((1109, 1123), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1121, 1123), False, 'from datetime import datetime\n'), ((6598, 6624), 'json.dumps', 'json.dumps', (['emergencies_en'], {}), '(emergencies_en)\n', (6608, 6624), False, 'import json\n')]
"""Download.""" import json import logging import os import subprocess import sys from enum import Enum from importlib import util from pathlib import Path from typing import Any, Dict, List, Optional, Union import pkg_resources import requests from pkg_resources import DistributionNotFound from llama_index.download.utils import ( get_exports, get_file_content, initialize_directory, rewrite_exports, ) LLAMA_HUB_CONTENTS_URL = f"https://raw.githubusercontent.com/run-llama/llama-hub/main" LLAMA_HUB_PATH = "/llama_hub" LLAMA_HUB_URL = LLAMA_HUB_CONTENTS_URL + LLAMA_HUB_PATH PATH_TYPE = Union[str, Path] logger = logging.getLogger(__name__) LLAMAHUB_ANALYTICS_PROXY_SERVER = "https://llamahub.ai/api/analytics/downloads" class MODULE_TYPE(str, Enum): LOADER = "loader" TOOL = "tool" LLAMAPACK = "llamapack" DATASETS = "datasets" def get_module_info( local_dir_path: PATH_TYPE, remote_dir_path: PATH_TYPE, module_class: str, refresh_cache: bool = False, library_path: str = "library.json", disable_library_cache: bool = False, ) -> Dict: """Get module info.""" if isinstance(local_dir_path, str): local_dir_path = Path(local_dir_path) local_library_path = f"{local_dir_path}/{library_path}" module_id = None # e.g. `web/simple_web` extra_files = [] # e.g. `web/simple_web/utils.py` # Check cache first if not refresh_cache and os.path.exists(local_library_path): with open(local_library_path) as f: library = json.load(f) if module_class in library: module_id = library[module_class]["id"] extra_files = library[module_class].get("extra_files", []) # Fetch up-to-date library from remote repo if module_id not found if module_id is None: library_raw_content, _ = get_file_content( str(remote_dir_path), f"/{library_path}" ) library = json.loads(library_raw_content) if module_class not in library: raise ValueError("Loader class name not found in library") module_id = library[module_class]["id"] extra_files = library[module_class].get("extra_files", []) # create cache dir if needed local_library_dir = os.path.dirname(local_library_path) if not disable_library_cache: if not os.path.exists(local_library_dir): os.makedirs(local_library_dir) # Update cache with open(local_library_path, "w") as f: f.write(library_raw_content) if module_id is None: raise ValueError("Loader class name not found in library") return { "module_id": module_id, "extra_files": extra_files, } def download_module_and_reqs( local_dir_path: PATH_TYPE, remote_dir_path: PATH_TYPE, module_id: str, extra_files: List[str], refresh_cache: bool = False, use_gpt_index_import: bool = False, base_file_name: str = "base.py", override_path: bool = False, ) -> None: """Load module.""" if isinstance(local_dir_path, str): local_dir_path = Path(local_dir_path) if override_path: module_path = str(local_dir_path) else: module_path = f"{local_dir_path}/{module_id}" if refresh_cache or not os.path.exists(module_path): os.makedirs(module_path, exist_ok=True) basepy_raw_content, _ = get_file_content( str(remote_dir_path), f"/{module_id}/{base_file_name}" ) if use_gpt_index_import: basepy_raw_content = basepy_raw_content.replace( "import llama_index", "import llama_index" ) basepy_raw_content = basepy_raw_content.replace( "from llama_index", "from llama_index" ) with open(f"{module_path}/{base_file_name}", "w") as f: f.write(basepy_raw_content) # Get content of extra files if there are any # and write them under the loader directory for extra_file in extra_files: extra_file_raw_content, _ = get_file_content( str(remote_dir_path), f"/{module_id}/{extra_file}" ) # If the extra file is an __init__.py file, we need to # add the exports to the __init__.py file in the modules directory if extra_file == "__init__.py": loader_exports = get_exports(extra_file_raw_content) existing_exports = [] init_file_path = local_dir_path / "__init__.py" # if the __init__.py file do not exists, we need to create it mode = "a+" if not os.path.exists(init_file_path) else "r+" with open(init_file_path, mode) as f: f.write(f"from .{module_id} import {', '.join(loader_exports)}") existing_exports = get_exports(f.read()) rewrite_exports(existing_exports + loader_exports, str(local_dir_path)) with open(f"{module_path}/{extra_file}", "w") as f: f.write(extra_file_raw_content) # install requirements requirements_path = f"{local_dir_path}/requirements.txt" if not os.path.exists(requirements_path): # NOTE: need to check the status code response_txt, status_code = get_file_content( str(remote_dir_path), f"/{module_id}/requirements.txt" ) if status_code == 200: with open(requirements_path, "w") as f: f.write(response_txt) # Install dependencies if there are any and not already installed if os.path.exists(requirements_path): try: requirements = pkg_resources.parse_requirements( Path(requirements_path).open() ) pkg_resources.require([str(r) for r in requirements]) except DistributionNotFound: subprocess.check_call( [sys.executable, "-m", "pip", "install", "-r", requirements_path] ) def download_llama_module( module_class: str, llama_hub_url: str = LLAMA_HUB_URL, refresh_cache: bool = False, custom_dir: Optional[str] = None, custom_path: Optional[str] = None, library_path: str = "library.json", base_file_name: str = "base.py", use_gpt_index_import: bool = False, disable_library_cache: bool = False, override_path: bool = False, skip_load: bool = False, ) -> Any: """Download a module from LlamaHub. Can be a loader, tool, pack, or more. Args: loader_class: The name of the llama module class you want to download, such as `GmailOpenAIAgentPack`. refresh_cache: If true, the local cache will be skipped and the loader will be fetched directly from the remote repo. custom_dir: Custom dir name to download loader into (under parent folder). custom_path: Custom dirpath to download loader into. library_path: File name of the library file. use_gpt_index_import: If true, the loader files will use llama_index as the base dependency. By default (False), the loader files use llama_index as the base dependency. NOTE: this is a temporary workaround while we fully migrate all usages to llama_index. is_dataset: whether or not downloading a LlamaDataset Returns: A Loader, A Pack, An Agent, or A Dataset """ # create directory / get path dirpath = initialize_directory(custom_path=custom_path, custom_dir=custom_dir) # fetch info from library.json file module_info = get_module_info( local_dir_path=dirpath, remote_dir_path=llama_hub_url, module_class=module_class, refresh_cache=refresh_cache, library_path=library_path, disable_library_cache=disable_library_cache, ) module_id = module_info["module_id"] extra_files = module_info["extra_files"] # download the module, install requirements download_module_and_reqs( local_dir_path=dirpath, remote_dir_path=llama_hub_url, module_id=module_id, extra_files=extra_files, refresh_cache=refresh_cache, use_gpt_index_import=use_gpt_index_import, base_file_name=base_file_name, override_path=override_path, ) if skip_load: return None # loads the module into memory if override_path: path = f"{dirpath}/{base_file_name}" spec = util.spec_from_file_location("custom_module", location=path) if spec is None: raise ValueError(f"Could not find file: {path}.") else: path = f"{dirpath}/{module_id}/{base_file_name}" spec = util.spec_from_file_location("custom_module", location=path) if spec is None: raise ValueError(f"Could not find file: {path}.") module = util.module_from_spec(spec) spec.loader.exec_module(module) # type: ignore return getattr(module, module_class) def track_download(module_class: str, module_type: str) -> None: """Tracks number of downloads via Llamahub proxy. Args: module_class: The name of the llama module being downloaded, e.g.,`GmailOpenAIAgentPack`. module_type: Can be "loader", "tool", "llamapack", or "datasets" """ try: requests.post( LLAMAHUB_ANALYTICS_PROXY_SERVER, json={"type": module_type, "plugin": module_class}, ) except Exception as e: logger.info(f"Error tracking downloads for {module_class} : {e}")
[ "llama_index.download.utils.get_exports", "llama_index.download.utils.initialize_directory" ]
[((637, 664), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (654, 664), False, 'import logging\n'), ((5550, 5583), 'os.path.exists', 'os.path.exists', (['requirements_path'], {}), '(requirements_path)\n', (5564, 5583), False, 'import os\n'), ((7432, 7500), 'llama_index.download.utils.initialize_directory', 'initialize_directory', ([], {'custom_path': 'custom_path', 'custom_dir': 'custom_dir'}), '(custom_path=custom_path, custom_dir=custom_dir)\n', (7452, 7500), False, 'from llama_index.download.utils import get_exports, get_file_content, initialize_directory, rewrite_exports\n'), ((8830, 8857), 'importlib.util.module_from_spec', 'util.module_from_spec', (['spec'], {}), '(spec)\n', (8851, 8857), False, 'from importlib import util\n'), ((1197, 1217), 'pathlib.Path', 'Path', (['local_dir_path'], {}), '(local_dir_path)\n', (1201, 1217), False, 'from pathlib import Path\n'), ((1434, 1468), 'os.path.exists', 'os.path.exists', (['local_library_path'], {}), '(local_library_path)\n', (1448, 1468), False, 'import os\n'), ((1938, 1969), 'json.loads', 'json.loads', (['library_raw_content'], {}), '(library_raw_content)\n', (1948, 1969), False, 'import json\n'), ((2263, 2298), 'os.path.dirname', 'os.path.dirname', (['local_library_path'], {}), '(local_library_path)\n', (2278, 2298), False, 'import os\n'), ((3131, 3151), 'pathlib.Path', 'Path', (['local_dir_path'], {}), '(local_dir_path)\n', (3135, 3151), False, 'from pathlib import Path\n'), ((3347, 3386), 'os.makedirs', 'os.makedirs', (['module_path'], {'exist_ok': '(True)'}), '(module_path, exist_ok=True)\n', (3358, 3386), False, 'import os\n'), ((5139, 5172), 'os.path.exists', 'os.path.exists', (['requirements_path'], {}), '(requirements_path)\n', (5153, 5172), False, 'import os\n'), ((8438, 8498), 'importlib.util.spec_from_file_location', 'util.spec_from_file_location', (['"""custom_module"""'], {'location': 'path'}), "('custom_module', location=path)\n", (8466, 8498), False, 'from importlib import util\n'), ((8668, 8728), 'importlib.util.spec_from_file_location', 'util.spec_from_file_location', (['"""custom_module"""'], {'location': 'path'}), "('custom_module', location=path)\n", (8696, 8728), False, 'from importlib import util\n'), ((9280, 9382), 'requests.post', 'requests.post', (['LLAMAHUB_ANALYTICS_PROXY_SERVER'], {'json': "{'type': module_type, 'plugin': module_class}"}), "(LLAMAHUB_ANALYTICS_PROXY_SERVER, json={'type': module_type,\n 'plugin': module_class})\n", (9293, 9382), False, 'import requests\n'), ((1536, 1548), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1545, 1548), False, 'import json\n'), ((3310, 3337), 'os.path.exists', 'os.path.exists', (['module_path'], {}), '(module_path)\n', (3324, 3337), False, 'import os\n'), ((4385, 4420), 'llama_index.download.utils.get_exports', 'get_exports', (['extra_file_raw_content'], {}), '(extra_file_raw_content)\n', (4396, 4420), False, 'from llama_index.download.utils import get_exports, get_file_content, initialize_directory, rewrite_exports\n'), ((2356, 2389), 'os.path.exists', 'os.path.exists', (['local_library_dir'], {}), '(local_library_dir)\n', (2370, 2389), False, 'import os\n'), ((2407, 2437), 'os.makedirs', 'os.makedirs', (['local_library_dir'], {}), '(local_library_dir)\n', (2418, 2437), False, 'import os\n'), ((5835, 5927), 'subprocess.check_call', 'subprocess.check_call', (["[sys.executable, '-m', 'pip', 'install', '-r', requirements_path]"], {}), "([sys.executable, '-m', 'pip', 'install', '-r',\n requirements_path])\n", (5856, 5927), False, 'import subprocess\n'), ((4620, 4650), 'os.path.exists', 'os.path.exists', (['init_file_path'], {}), '(init_file_path)\n', (4634, 4650), False, 'import os\n'), ((5675, 5698), 'pathlib.Path', 'Path', (['requirements_path'], {}), '(requirements_path)\n', (5679, 5698), False, 'from pathlib import Path\n')]
import os import openai import logging import sys import llama_index from llama_index import ( VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext, LLMPredictor, PromptHelper, ServiceContext, ) from llama_index.llms import OpenAI import chromadb from llama_index.vector_stores import ChromaVectorStore from llama_index.embeddings import OpenAIEmbedding from trulens_eval import Tru from llama_index.query_engine import CitationQueryEngine import json openai.api_key = os.environ["OPENAI_API_KEY"] CUSTOM_QUERY = "First greet yourself and Send me a summary of the file. In your summary, make sure to mention the file location and the data name, also to have 10 bullet points. Each bullet point should be on a new row. Try to incorporate few key points from all the text. Do it step by step:" list_of_indices = [] tru = Tru() tru.reset_database() logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) def create_index(directory, unique_folder_id): llm = OpenAI(temperature=0.1, model="gpt-4-vision-preview", max_tokens=512) prompt_helper = PromptHelper( context_window=4096, num_output=256, chunk_overlap_ratio=0.1, chunk_size_limit=None, ) service_context = ServiceContext.from_defaults(llm=llm, prompt_helper=prompt_helper) documents = SimpleDirectoryReader(input_dir=directory).load_data() index = VectorStoreIndex.from_documents(documents, service_context=service_context) index.set_index_id(create_dynamic_vector_ids(unique_folder_id)) index.storage_context.persist(create_dynamic_storage_contexts(unique_folder_id)) a = index.index_struct_cls # Chroma vector store for easy indexing and retrieval db = chromadb.PersistentClient(path="./chroma_db") chroma_collection = db.get_or_create_collection("investment_ai") chroma_vector_store = ChromaVectorStore(chroma_collection=chroma_collection) chroma_storage_context = StorageContext.from_defaults( vector_store=chroma_vector_store ) chroma_index = VectorStoreIndex.from_documents( documents, storage_context=chroma_storage_context, service_context=service_context, ) print(chroma_index.storage_context.graph_store.get) return index def auto_summarization(unique_folder_id): dynamic_storage_context = create_dynamic_storage_contexts(unique_folder_id) dynamic_vector_id = create_dynamic_vector_ids(unique_folder_id) storage_context = StorageContext.from_defaults(persist_dir=dynamic_storage_context) # load index index = load_index_from_storage(storage_context, index_id=dynamic_vector_id) query_engine = index.as_query_engine(response_mode="compact", verbose=True) response = query_engine.query(CUSTOM_QUERY) return str(response.response) return str(response.response) def ask_question(query, unique_folder_id): dynamic_storage_context = create_dynamic_storage_contexts(unique_folder_id) dynamic_vector_id = create_dynamic_vector_ids(unique_folder_id) # rebuild storage context storage_context = StorageContext.from_defaults(persist_dir=dynamic_storage_context) # load index index = llama_index.indices.loading.load_index_from_storage( storage_context, index_id=dynamic_vector_id ) query_engine = CitationQueryEngine.from_args( index, similarity_top_k=3, citation_chunk_size=512, streaming=True ) response_stream = query_engine.query( "When a question is asked always and if it is a greeting please answer accordingly.If question is not about given data, say you only answer about given data. If the question is about the given data please eloborate more on details and answer human-like according to this question: " + query ) return response_stream def create_dynamic_storage_contexts(unique_folder_id): return "./storage_" + str(unique_folder_id) def create_dynamic_vector_ids(unique_folder_id): return "vector_index_" + str(unique_folder_id)
[ "llama_index.VectorStoreIndex.from_documents", "llama_index.query_engine.CitationQueryEngine.from_args", "llama_index.ServiceContext.from_defaults", "llama_index.vector_stores.ChromaVectorStore", "llama_index.StorageContext.from_defaults", "llama_index.SimpleDirectoryReader", "llama_index.llms.OpenAI", "llama_index.indices.loading.load_index_from_storage", "llama_index.load_index_from_storage", "llama_index.PromptHelper" ]
[((879, 884), 'trulens_eval.Tru', 'Tru', ([], {}), '()\n', (882, 884), False, 'from trulens_eval import Tru\n'), ((907, 965), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (926, 965), False, 'import logging\n'), ((997, 1037), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (1018, 1037), False, 'import logging\n'), ((1098, 1167), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.1)', 'model': '"""gpt-4-vision-preview"""', 'max_tokens': '(512)'}), "(temperature=0.1, model='gpt-4-vision-preview', max_tokens=512)\n", (1104, 1167), False, 'from llama_index.llms import OpenAI\n'), ((1188, 1289), 'llama_index.PromptHelper', 'PromptHelper', ([], {'context_window': '(4096)', 'num_output': '(256)', 'chunk_overlap_ratio': '(0.1)', 'chunk_size_limit': 'None'}), '(context_window=4096, num_output=256, chunk_overlap_ratio=0.1,\n chunk_size_limit=None)\n', (1200, 1289), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext, LLMPredictor, PromptHelper, ServiceContext\n'), ((1348, 1414), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'prompt_helper': 'prompt_helper'}), '(llm=llm, prompt_helper=prompt_helper)\n', (1376, 1414), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext, LLMPredictor, PromptHelper, ServiceContext\n'), ((1500, 1575), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (1531, 1575), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext, LLMPredictor, PromptHelper, ServiceContext\n'), ((1828, 1873), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': '"""./chroma_db"""'}), "(path='./chroma_db')\n", (1853, 1873), False, 'import chromadb\n'), ((1970, 2024), 'llama_index.vector_stores.ChromaVectorStore', 'ChromaVectorStore', ([], {'chroma_collection': 'chroma_collection'}), '(chroma_collection=chroma_collection)\n', (1987, 2024), False, 'from llama_index.vector_stores import ChromaVectorStore\n'), ((2055, 2117), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'chroma_vector_store'}), '(vector_store=chroma_vector_store)\n', (2083, 2117), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext, LLMPredictor, PromptHelper, ServiceContext\n'), ((2152, 2272), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'storage_context': 'chroma_storage_context', 'service_context': 'service_context'}), '(documents, storage_context=\n chroma_storage_context, service_context=service_context)\n', (2183, 2272), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext, LLMPredictor, PromptHelper, ServiceContext\n'), ((2586, 2651), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'dynamic_storage_context'}), '(persist_dir=dynamic_storage_context)\n', (2614, 2651), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext, LLMPredictor, PromptHelper, ServiceContext\n'), ((2681, 2749), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {'index_id': 'dynamic_vector_id'}), '(storage_context, index_id=dynamic_vector_id)\n', (2704, 2749), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext, LLMPredictor, PromptHelper, ServiceContext\n'), ((3192, 3257), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'dynamic_storage_context'}), '(persist_dir=dynamic_storage_context)\n', (3220, 3257), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext, LLMPredictor, PromptHelper, ServiceContext\n'), ((3287, 3387), 'llama_index.indices.loading.load_index_from_storage', 'llama_index.indices.loading.load_index_from_storage', (['storage_context'], {'index_id': 'dynamic_vector_id'}), '(storage_context,\n index_id=dynamic_vector_id)\n', (3338, 3387), False, 'import llama_index\n'), ((3417, 3518), 'llama_index.query_engine.CitationQueryEngine.from_args', 'CitationQueryEngine.from_args', (['index'], {'similarity_top_k': '(3)', 'citation_chunk_size': '(512)', 'streaming': '(True)'}), '(index, similarity_top_k=3,\n citation_chunk_size=512, streaming=True)\n', (3446, 3518), False, 'from llama_index.query_engine import CitationQueryEngine\n'), ((966, 985), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (983, 985), False, 'import logging\n'), ((1432, 1474), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_dir': 'directory'}), '(input_dir=directory)\n', (1453, 1474), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext, LLMPredictor, PromptHelper, ServiceContext\n')]
import streamlit as st import llama_index from llama_index import StorageContext, load_index_from_storage from llama_index.query_engine import RetrieverQueryEngine from llama_index.storage.docstore import SimpleDocumentStore from llama_index.vector_stores import SimpleVectorStore from llama_index.storage.index_store import SimpleIndexStore from llama_index import KeywordTableIndex from llama_index.indices.keyword_table import SimpleKeywordTableIndex from llama_index import ResponseSynthesizer from llama_index.indices.postprocessor import SimilarityPostprocessor from llama_index.retrievers import VectorIndexRetriever from llama_index.retrievers import ListIndexRetriever from llama_index.retrievers import TreeRootRetriever from llama_index.indices.keyword_table.retrievers import KeywordTableGPTRetriever from llama_index.indices.keyword_table import GPTSimpleKeywordTableIndex from llama_index.indices.keyword_table.retrievers import KeywordTableRAKERetriever from llama_index.indices.keyword_table.retrievers import KeywordTableSimpleRetriever from llama_index import Prompt from llama_index import LLMPredictor from langchain.chat_models import ChatOpenAI from llama_index import ServiceContext print("1") storage_context_1 = StorageContext.from_defaults( docstore=SimpleDocumentStore.from_persist_dir(persist_dir="vector_store"), vector_store=SimpleVectorStore.from_persist_dir(persist_dir="vector_store"), index_store=SimpleIndexStore.from_persist_dir(persist_dir="vector_store"), ) storage_context_2 = StorageContext.from_defaults( docstore=SimpleDocumentStore.from_persist_dir(persist_dir="table"), vector_store=SimpleVectorStore.from_persist_dir(persist_dir="table"), index_store=SimpleIndexStore.from_persist_dir(persist_dir="table"), ) storage_context_3 = StorageContext.from_defaults( docstore=SimpleDocumentStore.from_persist_dir(persist_dir="tree"), vector_store=SimpleVectorStore.from_persist_dir(persist_dir="tree"), index_store=SimpleIndexStore.from_persist_dir(persist_dir="tree"), ) storage_context_4 = StorageContext.from_defaults( docstore=SimpleDocumentStore.from_persist_dir(persist_dir="list"), vector_store=SimpleVectorStore.from_persist_dir(persist_dir="list"), index_store=SimpleIndexStore.from_persist_dir(persist_dir="list"), ) print("2") from llama_index import load_index_from_storage, load_indices_from_storage, load_graph_from_storage indices1 = load_index_from_storage(storage_context_1) indices2 = load_index_from_storage(storage_context_2) indices3 = load_index_from_storage(storage_context_3) indices4 = load_index_from_storage(storage_context_4) # indices1 = load_index_from_storage(storage_context="vector_store") index = [indices1, indices2, indices3, indices4] print("3") print("4") from llama_index.indices.response import BaseResponseBuilder # configure response synthesizer response_synthesizer = ResponseSynthesizer.from_args( # node_postprocessors=[ # ] ) print("5") TEMPLATE_STR = ( "We have provided context information below. \n" "---------------------\n" "{context_str}" "\n---------------------\n" "Given this information, please answer the question: {query_str}\n" ) QA_TEMPLATE = Prompt(TEMPLATE_STR) llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo", streaming=True)) service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, chunk_size=1024) query_engine1 = indices3.as_query_engine(service_context=service_context, text_qa_template=QA_TEMPLATE, similarity_top_k=3, streaming=True, ) response = query_engine1.query('How much package has government of india announced?') # print("7") str(response) print(response) # response.source_nodes print(response.source_nodes) ########## working ##########
[ "llama_index.storage.docstore.SimpleDocumentStore.from_persist_dir", "llama_index.ResponseSynthesizer.from_args", "llama_index.ServiceContext.from_defaults", "llama_index.storage.index_store.SimpleIndexStore.from_persist_dir", "llama_index.Prompt", "llama_index.load_index_from_storage", "llama_index.vector_stores.SimpleVectorStore.from_persist_dir" ]
[((2439, 2481), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context_1'], {}), '(storage_context_1)\n', (2462, 2481), False, 'from llama_index import load_index_from_storage, load_indices_from_storage, load_graph_from_storage\n'), ((2493, 2535), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context_2'], {}), '(storage_context_2)\n', (2516, 2535), False, 'from llama_index import load_index_from_storage, load_indices_from_storage, load_graph_from_storage\n'), ((2547, 2589), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context_3'], {}), '(storage_context_3)\n', (2570, 2589), False, 'from llama_index import load_index_from_storage, load_indices_from_storage, load_graph_from_storage\n'), ((2601, 2643), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context_4'], {}), '(storage_context_4)\n', (2624, 2643), False, 'from llama_index import load_index_from_storage, load_indices_from_storage, load_graph_from_storage\n'), ((2909, 2940), 'llama_index.ResponseSynthesizer.from_args', 'ResponseSynthesizer.from_args', ([], {}), '()\n', (2938, 2940), False, 'from llama_index import ResponseSynthesizer\n'), ((3238, 3258), 'llama_index.Prompt', 'Prompt', (['TEMPLATE_STR'], {}), '(TEMPLATE_STR)\n', (3244, 3258), False, 'from llama_index import Prompt\n'), ((3383, 3457), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'chunk_size': '(1024)'}), '(llm_predictor=llm_predictor, chunk_size=1024)\n', (3411, 3457), False, 'from llama_index import ServiceContext\n'), ((1282, 1346), 'llama_index.storage.docstore.SimpleDocumentStore.from_persist_dir', 'SimpleDocumentStore.from_persist_dir', ([], {'persist_dir': '"""vector_store"""'}), "(persist_dir='vector_store')\n", (1318, 1346), False, 'from llama_index.storage.docstore import SimpleDocumentStore\n'), ((1365, 1427), 'llama_index.vector_stores.SimpleVectorStore.from_persist_dir', 'SimpleVectorStore.from_persist_dir', ([], {'persist_dir': '"""vector_store"""'}), "(persist_dir='vector_store')\n", (1399, 1427), False, 'from llama_index.vector_stores import SimpleVectorStore\n'), ((1445, 1506), 'llama_index.storage.index_store.SimpleIndexStore.from_persist_dir', 'SimpleIndexStore.from_persist_dir', ([], {'persist_dir': '"""vector_store"""'}), "(persist_dir='vector_store')\n", (1478, 1506), False, 'from llama_index.storage.index_store import SimpleIndexStore\n'), ((1574, 1631), 'llama_index.storage.docstore.SimpleDocumentStore.from_persist_dir', 'SimpleDocumentStore.from_persist_dir', ([], {'persist_dir': '"""table"""'}), "(persist_dir='table')\n", (1610, 1631), False, 'from llama_index.storage.docstore import SimpleDocumentStore\n'), ((1650, 1705), 'llama_index.vector_stores.SimpleVectorStore.from_persist_dir', 'SimpleVectorStore.from_persist_dir', ([], {'persist_dir': '"""table"""'}), "(persist_dir='table')\n", (1684, 1705), False, 'from llama_index.vector_stores import SimpleVectorStore\n'), ((1723, 1777), 'llama_index.storage.index_store.SimpleIndexStore.from_persist_dir', 'SimpleIndexStore.from_persist_dir', ([], {'persist_dir': '"""table"""'}), "(persist_dir='table')\n", (1756, 1777), False, 'from llama_index.storage.index_store import SimpleIndexStore\n'), ((1844, 1900), 'llama_index.storage.docstore.SimpleDocumentStore.from_persist_dir', 'SimpleDocumentStore.from_persist_dir', ([], {'persist_dir': '"""tree"""'}), "(persist_dir='tree')\n", (1880, 1900), False, 'from llama_index.storage.docstore import SimpleDocumentStore\n'), ((1919, 1973), 'llama_index.vector_stores.SimpleVectorStore.from_persist_dir', 'SimpleVectorStore.from_persist_dir', ([], {'persist_dir': '"""tree"""'}), "(persist_dir='tree')\n", (1953, 1973), False, 'from llama_index.vector_stores import SimpleVectorStore\n'), ((1991, 2044), 'llama_index.storage.index_store.SimpleIndexStore.from_persist_dir', 'SimpleIndexStore.from_persist_dir', ([], {'persist_dir': '"""tree"""'}), "(persist_dir='tree')\n", (2024, 2044), False, 'from llama_index.storage.index_store import SimpleIndexStore\n'), ((2111, 2167), 'llama_index.storage.docstore.SimpleDocumentStore.from_persist_dir', 'SimpleDocumentStore.from_persist_dir', ([], {'persist_dir': '"""list"""'}), "(persist_dir='list')\n", (2147, 2167), False, 'from llama_index.storage.docstore import SimpleDocumentStore\n'), ((2186, 2240), 'llama_index.vector_stores.SimpleVectorStore.from_persist_dir', 'SimpleVectorStore.from_persist_dir', ([], {'persist_dir': '"""list"""'}), "(persist_dir='list')\n", (2220, 2240), False, 'from llama_index.vector_stores import SimpleVectorStore\n'), ((2258, 2311), 'llama_index.storage.index_store.SimpleIndexStore.from_persist_dir', 'SimpleIndexStore.from_persist_dir', ([], {'persist_dir': '"""list"""'}), "(persist_dir='list')\n", (2291, 2311), False, 'from llama_index.storage.index_store import SimpleIndexStore\n'), ((3293, 3362), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo"""', 'streaming': '(True)'}), "(temperature=0, model_name='gpt-3.5-turbo', streaming=True)\n", (3303, 3362), False, 'from langchain.chat_models import ChatOpenAI\n')]
# https://www.youtube.com/watch?v=oDzWsynpOyI import logging import sys import os from dotenv import load_dotenv load_dotenv() from llama_index import ( VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext, ServiceContext, Document, ) import json import llama_index from llama_index.llms import AzureOpenAI from llama_index.node_parser import ( SentenceWindowNodeParser, HierarchicalNodeParser, get_leaf_nodes, ) from llama_index.text_splitter import SentenceSplitter from llama_index.embeddings import AzureOpenAIEmbedding, HuggingFaceEmbedding from llama_index.schema import MetadataMode from llama_index.postprocessor import ( MetadataReplacementPostProcessor, SimilarityPostprocessor, ) from llama_index import set_global_service_context from llama_index.llms.types import ChatMessage import chromadb from llama_index.vector_stores import ChromaVectorStore ### THE LLM api_key = os.getenv("AZURE_OPENAI_API_KEY") azure_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT") api_version = os.getenv("OPENAI_API_VERSION") llm = AzureOpenAI( engine="chat", temperature=0.1, api_key=api_key, azure_endpoint=azure_endpoint, api_version=api_version, ) embed_model = AzureOpenAIEmbedding( azure_deployment="embeddings", api_key=api_key, azure_endpoint=azure_endpoint, api_version=api_version, ) def _print_docs(docs): # inspect documents print("length of documents: ", str(len(docs))) print("-----") print(docs) print("-----Metadata-----") for doc in docs: print(doc.metadata) def _print_nodes(name, nodes): print("-----" + name + "-----") counter = 1 for node in nodes: print(f"-----Node {counter}") dict_node = dict(node) print(dict_node) counter += 1 print("-----") def _create_text_qa_template(): from llama_index.llms import ChatMessage, MessageRole from llama_index.prompts import ChatPromptTemplate # Text QA Prompt chat_text_qa_msgs = [ ChatMessage( role=MessageRole.SYSTEM, content=( "You are an helpful chat assistant. You are here to help the user.Answer must be in the original language." ), ), ChatMessage( role=MessageRole.USER, content=( "Context information is below.\n" "---------------------\n" "{context_str}\n" "---------------------\n" "Given the context information and not prior knowledge," "answer the question: {query_str}\n" ), ), ] text_qa_template = ChatPromptTemplate(chat_text_qa_msgs) return text_qa_template def _create_refine_template(): from llama_index.llms import ChatMessage, MessageRole from llama_index.prompts import ChatPromptTemplate # Refine Prompt chat_refine_msgs = [ ChatMessage( role=MessageRole.SYSTEM, content=("Always answer the question, even if the context isn't helpful."), ), ChatMessage( role=MessageRole.USER, content=( "We have the opportunity to refine the original answer " "(only if needed) with some more context below.\n" "------------\n" "{context_msg}\n" "------------\n" "Given the new context, refine the original answer to better " "answer the question: {query_str}. " "If the context isn't useful, output the original answer again.\n" "Original Answer: {existing_answer}" ), ), ] refine_template = ChatPromptTemplate(chat_refine_msgs) return refine_template def create_window_nodes(path="./sample-docs/"): # get the file documents = SimpleDirectoryReader(path).load_data() # _print_docs(documents) sentence_node_parser = SentenceWindowNodeParser.from_defaults( window_size=3, window_metadata_key="window", original_text_metadata_key="original_text", ) window_nodes = sentence_node_parser.get_nodes_from_documents(documents) # _print_nodes("WINDOW NODES", window_nodes) return window_nodes def create_base_nodes(path="./sample-docs/"): # get the file documents = SimpleDirectoryReader(path).load_data() # _print_docs(documents) base_node_parser = SentenceSplitter() base_nodes = base_node_parser.get_nodes_from_documents(documents) # _print_nodes("BASE NODES", base_nodes) return base_nodes def save_on_chroma_and_get_index(nodes, collection_name): ### CREATE THE VECTOR STORES ### SAVING VECTORS ON DISK db = chromadb.PersistentClient(path="./chroma_db") vector_collection = db.get_or_create_collection(collection_name) vector_store = ChromaVectorStore(chroma_collection=vector_collection) storage_context = StorageContext.from_defaults(vector_store=vector_store) ctx = ServiceContext.from_defaults( llm=llm, embed_model=embed_model, node_parser=nodes ) index = VectorStoreIndex( nodes, storage_context=storage_context, service_context=ctx ) return index def get_index(collection_name): db2 = chromadb.PersistentClient(path="./chroma_db") service_context = ServiceContext.from_defaults(embed_model=embed_model, llm=llm) collection = db2.get_or_create_collection(collection_name) vector_store = ChromaVectorStore(chroma_collection=collection) index = VectorStoreIndex.from_vector_store( vector_store, service_context=service_context, ) return index def run_window_index_sample(question): window_index = get_index("window-detrazioni") text_qa_template = _create_text_qa_template() refine_template = _create_refine_template() window_query_engine = window_index.as_query_engine( similarity_top_k=5, verbose=True, text_qa_template=text_qa_template, # refine_template=refine_template, node_postprocessor=MetadataReplacementPostProcessor( target_metadata_key="window", ) # node_postprocessors=[ # SimilarityPostprocessor(similarity_cutoff=0.7), # MetadataReplacementPostProcessor( # target_metadata_key="window", # ), # ], ) base_response = window_query_engine.query(question) print(base_response) def run_base_index_sample(question): base_index = get_index("base-detrazioni") text_qa_template = _create_text_qa_template() refine_template = _create_refine_template() # Query engine # base_query_engine = base_index.as_query_engine( # verbose=True, # text_qa_template=text_qa_template, # # refine_template=refine_template, # ) # chat engine base_query_engine = base_index.as_chat_engine() base_response = base_query_engine.chat(question) print(base_response) if __name__ == "__main__": logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) # windows_node = create_window_nodes() # window_index = save_on_chroma_and_get_index(windows_node, "window-detrazioni") ### INFERENCE question = "question!!!" # window_index = run_window_index_sample(question=question) base_index = run_base_index_sample(question=question) # ### TODO : TO INVESTIGATE # ### SAVING INDEX DEFINITION ON DISK # ### this is useful to avoid having to recreate the index every time so we can save money # ### from embedding calls # window_index.storage_context.persist(persist_dir="./window-indexes") # base_index.storage_context.persist(persist_dir="./base-indexes") # ### RELOAD INDEXES FROM DISK # SC_retrieved_window = storage_context_window.from_defaults( # persist_dir="./window-indexes" # ) # SC_retrieved_base = storage_context_base.from_defaults(persist_dir="./base-indexes") # retrieved_window_index = load_index_from_storage(SC_retrieved_window) # retrieved_base_index = load_index_from_storage(SC_retrieved_base)
[ "llama_index.llms.AzureOpenAI", "llama_index.postprocessor.MetadataReplacementPostProcessor", "llama_index.embeddings.AzureOpenAIEmbedding", "llama_index.ServiceContext.from_defaults", "llama_index.vector_stores.ChromaVectorStore", "llama_index.StorageContext.from_defaults", "llama_index.VectorStoreIndex", "llama_index.SimpleDirectoryReader", "llama_index.VectorStoreIndex.from_vector_store", "llama_index.llms.ChatMessage", "llama_index.prompts.ChatPromptTemplate", "llama_index.node_parser.SentenceWindowNodeParser.from_defaults", "llama_index.text_splitter.SentenceSplitter" ]
[((116, 129), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (127, 129), False, 'from dotenv import load_dotenv\n'), ((963, 996), 'os.getenv', 'os.getenv', (['"""AZURE_OPENAI_API_KEY"""'], {}), "('AZURE_OPENAI_API_KEY')\n", (972, 996), False, 'import os\n'), ((1014, 1048), 'os.getenv', 'os.getenv', (['"""AZURE_OPENAI_ENDPOINT"""'], {}), "('AZURE_OPENAI_ENDPOINT')\n", (1023, 1048), False, 'import os\n'), ((1063, 1094), 'os.getenv', 'os.getenv', (['"""OPENAI_API_VERSION"""'], {}), "('OPENAI_API_VERSION')\n", (1072, 1094), False, 'import os\n'), ((1102, 1223), 'llama_index.llms.AzureOpenAI', 'AzureOpenAI', ([], {'engine': '"""chat"""', 'temperature': '(0.1)', 'api_key': 'api_key', 'azure_endpoint': 'azure_endpoint', 'api_version': 'api_version'}), "(engine='chat', temperature=0.1, api_key=api_key, azure_endpoint\n =azure_endpoint, api_version=api_version)\n", (1113, 1223), False, 'from llama_index.llms import AzureOpenAI\n'), ((1258, 1386), 'llama_index.embeddings.AzureOpenAIEmbedding', 'AzureOpenAIEmbedding', ([], {'azure_deployment': '"""embeddings"""', 'api_key': 'api_key', 'azure_endpoint': 'azure_endpoint', 'api_version': 'api_version'}), "(azure_deployment='embeddings', api_key=api_key,\n azure_endpoint=azure_endpoint, api_version=api_version)\n", (1278, 1386), False, 'from llama_index.embeddings import AzureOpenAIEmbedding, HuggingFaceEmbedding\n'), ((2716, 2753), 'llama_index.prompts.ChatPromptTemplate', 'ChatPromptTemplate', (['chat_text_qa_msgs'], {}), '(chat_text_qa_msgs)\n', (2734, 2753), False, 'from llama_index.prompts import ChatPromptTemplate\n'), ((3774, 3810), 'llama_index.prompts.ChatPromptTemplate', 'ChatPromptTemplate', (['chat_refine_msgs'], {}), '(chat_refine_msgs)\n', (3792, 3810), False, 'from llama_index.prompts import ChatPromptTemplate\n'), ((4020, 4152), 'llama_index.node_parser.SentenceWindowNodeParser.from_defaults', 'SentenceWindowNodeParser.from_defaults', ([], {'window_size': '(3)', 'window_metadata_key': '"""window"""', 'original_text_metadata_key': '"""original_text"""'}), "(window_size=3, window_metadata_key=\n 'window', original_text_metadata_key='original_text')\n", (4058, 4152), False, 'from llama_index.node_parser import SentenceWindowNodeParser, HierarchicalNodeParser, get_leaf_nodes\n'), ((4505, 4523), 'llama_index.text_splitter.SentenceSplitter', 'SentenceSplitter', ([], {}), '()\n', (4521, 4523), False, 'from llama_index.text_splitter import SentenceSplitter\n'), ((4796, 4841), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': '"""./chroma_db"""'}), "(path='./chroma_db')\n", (4821, 4841), False, 'import chromadb\n'), ((4931, 4985), 'llama_index.vector_stores.ChromaVectorStore', 'ChromaVectorStore', ([], {'chroma_collection': 'vector_collection'}), '(chroma_collection=vector_collection)\n', (4948, 4985), False, 'from llama_index.vector_stores import ChromaVectorStore\n'), ((5008, 5063), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (5036, 5063), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext, ServiceContext, Document\n'), ((5074, 5160), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'node_parser': 'nodes'}), '(llm=llm, embed_model=embed_model, node_parser=\n nodes)\n', (5102, 5160), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext, ServiceContext, Document\n'), ((5183, 5260), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', (['nodes'], {'storage_context': 'storage_context', 'service_context': 'ctx'}), '(nodes, storage_context=storage_context, service_context=ctx)\n', (5199, 5260), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext, ServiceContext, Document\n'), ((5337, 5382), 'chromadb.PersistentClient', 'chromadb.PersistentClient', ([], {'path': '"""./chroma_db"""'}), "(path='./chroma_db')\n", (5362, 5382), False, 'import chromadb\n'), ((5405, 5467), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'embed_model', 'llm': 'llm'}), '(embed_model=embed_model, llm=llm)\n', (5433, 5467), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext, ServiceContext, Document\n'), ((5551, 5598), 'llama_index.vector_stores.ChromaVectorStore', 'ChromaVectorStore', ([], {'chroma_collection': 'collection'}), '(chroma_collection=collection)\n', (5568, 5598), False, 'from llama_index.vector_stores import ChromaVectorStore\n'), ((5611, 5697), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', (['vector_store'], {'service_context': 'service_context'}), '(vector_store, service_context=\n service_context)\n', (5645, 5697), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext, ServiceContext, Document\n'), ((7100, 7158), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (7119, 7158), False, 'import logging\n'), ((2067, 2230), 'llama_index.llms.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.SYSTEM', 'content': '"""You are an helpful chat assistant. You are here to help the user.Answer must be in the original language."""'}), "(role=MessageRole.SYSTEM, content=\n 'You are an helpful chat assistant. You are here to help the user.Answer must be in the original language.'\n )\n", (2078, 2230), False, 'from llama_index.llms import ChatMessage, MessageRole\n'), ((2297, 2532), 'llama_index.llms.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.USER', 'content': '"""Context information is below.\n---------------------\n{context_str}\n---------------------\nGiven the context information and not prior knowledge,answer the question: {query_str}\n"""'}), '(role=MessageRole.USER, content=\n """Context information is below.\n---------------------\n{context_str}\n---------------------\nGiven the context information and not prior knowledge,answer the question: {query_str}\n"""\n )\n', (2308, 2532), False, 'from llama_index.llms import ChatMessage, MessageRole\n'), ((2984, 3099), 'llama_index.llms.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.SYSTEM', 'content': '"""Always answer the question, even if the context isn\'t helpful."""'}), '(role=MessageRole.SYSTEM, content=\n "Always answer the question, even if the context isn\'t helpful.")\n', (2995, 3099), False, 'from llama_index.llms import ChatMessage, MessageRole\n'), ((3141, 3533), 'llama_index.llms.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.USER', 'content': '"""We have the opportunity to refine the original answer (only if needed) with some more context below.\n------------\n{context_msg}\n------------\nGiven the new context, refine the original answer to better answer the question: {query_str}. If the context isn\'t useful, output the original answer again.\nOriginal Answer: {existing_answer}"""'}), '(role=MessageRole.USER, content=\n """We have the opportunity to refine the original answer (only if needed) with some more context below.\n------------\n{context_msg}\n------------\nGiven the new context, refine the original answer to better answer the question: {query_str}. If the context isn\'t useful, output the original answer again.\nOriginal Answer: {existing_answer}"""\n )\n', (3152, 3533), False, 'from llama_index.llms import ChatMessage, MessageRole\n'), ((7194, 7234), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (7215, 7234), False, 'import logging\n'), ((3923, 3950), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['path'], {}), '(path)\n', (3944, 3950), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext, ServiceContext, Document\n'), ((4412, 4439), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['path'], {}), '(path)\n', (4433, 4439), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, StorageContext, ServiceContext, Document\n'), ((6143, 6205), 'llama_index.postprocessor.MetadataReplacementPostProcessor', 'MetadataReplacementPostProcessor', ([], {'target_metadata_key': '"""window"""'}), "(target_metadata_key='window')\n", (6175, 6205), False, 'from llama_index.postprocessor import MetadataReplacementPostProcessor, SimilarityPostprocessor\n'), ((7163, 7182), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (7180, 7182), False, 'import logging\n')]
from importlib import metadata from pathlib import WindowsPath from re import sub from llama_index import ( ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage, global_service_context, ) import llama_index from llama_index.embeddings import HuggingFaceEmbedding from llama_index.schema import TextNode, MetadataMode from llama_index.vector_stores import MilvusVectorStore from llama_index.readers import SimpleDirectoryReader from llama_index.node_parser import SentenceWindowNodeParser, SentenceSplitter, SimpleNodeParser from llama_index.postprocessor import ( SimilarityPostprocessor, MetadataReplacementPostProcessor, ) from milvus import default_server import os from typing import List, Dict, Any, Optional from chatbot.common import DATA_PATH, EMBEDDING_DIM, EMBEDDING_MODEL, SIMILARITY_SEARCH_THRESHOLD, path_leaf, subjects, PathSep, debug class AugmentedIngestPipeline: def __init__( self, data_dir_path: str, service_context: ServiceContext, ) -> None: self.data_dir = data_dir_path self.service_ctx = service_context self.embed_model = self.service_ctx.embed_model self.vector_indexes = {} self.metadata_fn = lambda x: {"title": x.replace("_", " ")} self.node_parser = SentenceWindowNodeParser.from_defaults( window_size=3, window_metadata_key="window", original_text_metadata_key="original_text", include_metadata=True, ) self.create = False def _load_data(self, path): docs = SimpleDirectoryReader( path, file_metadata=self.metadata_fn, filename_as_id=True ).load_data() return docs def _make_nodes(self, docs): nodes = self.node_parser.get_nodes_from_documents(docs, show_progress=debug) return nodes def _insert_into_vectorstore(self, subject, nodes, create=False): collection_name = f"augmentED_{subject}" vector_store = MilvusVectorStore( dim=EMBEDDING_DIM, host="127.0.0.1", port=default_server.listen_port, collection_name=collection_name, overwrite=create, ) storage_ctx = StorageContext.from_defaults(vector_store=vector_store) self.vector_indexes[subject] = VectorStoreIndex( nodes=nodes, service_context=self.service_ctx, storage_context=storage_ctx, ) def _load_vectorstore(self, subject): collection_name = f"augmentED_{subject}" vector_store = MilvusVectorStore( dim=EMBEDDING_DIM, host="127.0.0.1", port=default_server.listen_port, collection_name=collection_name, overwrite=False ) storage_ctx = StorageContext.from_defaults(vector_store=vector_store) self.vector_indexes[subject] = VectorStoreIndex.from_vector_store( vector_store=vector_store, service_context=self.service_ctx, storage_context=storage_ctx, ) def _get_subject_query_engine(self, subject): query_engine = self.vector_indexes[subject].as_query_engine( similarity_top_k=3, node_postprocessors=[ SimilarityPostprocessor(similarity_cutoff=SIMILARITY_SEARCH_THRESHOLD), MetadataReplacementPostProcessor(target_metadata_key="window") ], ) return query_engine def _get_subject_chat_engine(self, subject): query_engine = self.vector_indexes[subject].as_chat_engine( mode="context", similarity_top_k=2, node_postprocessors=[ SimilarityPostprocessor(similarity_cutoff=SIMILARITY_SEARCH_THRESHOLD), MetadataReplacementPostProcessor(target_metadata_key="window") ], ) return query_engine def run_pipeline(self, create=False): self.create = create if self.create: self.one_giant_index_nodes = [] self.all_docs = [] for subject in subjects: path = self.data_dir + PathSep + subjects[subject] docs = self._load_data(path) nodes = self._make_nodes(docs) self._insert_into_vectorstore(subject=subject, nodes=nodes) self.one_giant_index_nodes.extend(nodes) self.all_docs.extend(docs) self._insert_into_vectorstore( subject="OneGiantIndex", nodes=self.one_giant_index_nodes, create=self.create ) else: for subject in subjects: self._load_vectorstore(subject) self._load_vectorstore("OneGiantIndex") self.one_giant_index = self.vector_indexes["OneGiantIndex"] self.query_everything = self._get_subject_query_engine("OneGiantIndex") def search_one_giant_index( self, query, top_k=10, replace_with_meta=True, metadata_key="title", ): retr = self.one_giant_index.as_retriever( similarity_top_k=top_k, ) answers = retr.retrieve(query) if replace_with_meta: return list(set(map(lambda x: x.metadata[metadata_key], answers))) else: return list( map(lambda x: x.get_content(metadata_mode=MetadataMode.LLM), answers) ) def query_one_file(self,file_path): docs = SimpleDirectoryReader( input_files=[file_path], file_metadata=self.metadata_fn, filename_as_id=True ).load_data() nodes = self._make_nodes(docs) self._insert_into_vectorstore("UserUploadedDocument", nodes) self._insert_into_vectorstore("OneGiantIndex", nodes) return self._get_subject_query_engine("UserUploadedDocument") class SimpleIngestPipeline: def __init__( self, data_dir_path: str, service_context: ServiceContext, create=False ) -> None: self.data_dir = data_dir_path self.service_ctx = service_context self.embed_model = self.service_ctx.embed_model self.vector_indexes = {} self.metadata_fn = lambda x: {"title": path_leaf(x)} self.node_parser = SimpleNodeParser(chunk_size=512) self.create = create def _load_data(self, path): docs = SimpleDirectoryReader( path, file_metadata=self.metadata_fn, filename_as_id=True ).load_data() return docs def _make_nodes(self, docs): nodes = self.node_parser.get_nodes_from_documents(docs, show_progress=debug) return nodes def _insert_into_vectorstore(self, subject, nodes, create=False): collection_name = f"augmentED_{subject}" vector_store = MilvusVectorStore( dim=EMBEDDING_DIM, host="127.0.0.1", port=default_server.listen_port, collection_name=collection_name, overwrite=create, ) storage_ctx = StorageContext.from_defaults(vector_store=vector_store) self.vector_indexes[subject] = VectorStoreIndex( nodes=nodes, service_context=self.service_ctx, storage_context=storage_ctx, ) def _load_vectorstore(self, subject): collection_name = f"augmentED_{subject}" vector_store = MilvusVectorStore( dim=EMBEDDING_DIM, host="127.0.0.1", port=default_server.listen_port, collection_name=collection_name, overwrite=False ) storage_ctx = StorageContext.from_defaults(vector_store=vector_store) self.vector_indexes[subject] = VectorStoreIndex.from_vector_store( vector_store=vector_store, service_context=self.service_ctx, storage_context=storage_ctx, ) def _get_subject_query_engine(self, subject) -> Dict: query_engine = self.vector_indexes[subject].as_query_engine( similarity_top_k=3, node_postprocessors=[ MetadataReplacementPostProcessor(target_metadata_key="window") ], ) return query_engine def run_pipeline(self): if self.create: self.one_giant_index_nodes = [] self.all_docs = [] for subject in subjects: path = self.data_dir + PathSep + subjects[subject] docs = self._load_data(path) nodes = self._make_nodes(docs) self._insert_into_vectorstore(subject=subject, nodes=nodes) self.one_giant_index_nodes.extend(nodes) self.all_docs.extend(docs) self._insert_into_vectorstore( subject="OneGiantIndex", nodes=self.one_giant_index_nodes, create=self.create ) else: for subject in subjects: self._load_vectorstore(subject) self._load_vectorstore("OneGiantIndex") self.one_giant_index = self.vector_indexes["OneGiantIndex"] if __name__ == "__main__": pipe = AugmentedIngestPipeline( data_dir_path=DATA_PATH, service_context=ServiceContext.from_defaults( llm=None, embed_model=HuggingFaceEmbedding(EMBEDDING_MODEL) ), ) pipe.run_pipeline(create=True)
[ "llama_index.postprocessor.MetadataReplacementPostProcessor", "llama_index.postprocessor.SimilarityPostprocessor", "llama_index.node_parser.SimpleNodeParser", "llama_index.StorageContext.from_defaults", "llama_index.VectorStoreIndex", "llama_index.readers.SimpleDirectoryReader", "llama_index.VectorStoreIndex.from_vector_store", "llama_index.vector_stores.MilvusVectorStore", "llama_index.embeddings.HuggingFaceEmbedding", "llama_index.node_parser.SentenceWindowNodeParser.from_defaults" ]
[((1300, 1460), 'llama_index.node_parser.SentenceWindowNodeParser.from_defaults', 'SentenceWindowNodeParser.from_defaults', ([], {'window_size': '(3)', 'window_metadata_key': '"""window"""', 'original_text_metadata_key': '"""original_text"""', 'include_metadata': '(True)'}), "(window_size=3, window_metadata_key=\n 'window', original_text_metadata_key='original_text', include_metadata=True\n )\n", (1338, 1460), False, 'from llama_index.node_parser import SentenceWindowNodeParser, SentenceSplitter, SimpleNodeParser\n'), ((2004, 2147), 'llama_index.vector_stores.MilvusVectorStore', 'MilvusVectorStore', ([], {'dim': 'EMBEDDING_DIM', 'host': '"""127.0.0.1"""', 'port': 'default_server.listen_port', 'collection_name': 'collection_name', 'overwrite': 'create'}), "(dim=EMBEDDING_DIM, host='127.0.0.1', port=default_server.\n listen_port, collection_name=collection_name, overwrite=create)\n", (2021, 2147), False, 'from llama_index.vector_stores import MilvusVectorStore\n'), ((2237, 2292), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (2265, 2292), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage, global_service_context\n'), ((2333, 2429), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', ([], {'nodes': 'nodes', 'service_context': 'self.service_ctx', 'storage_context': 'storage_ctx'}), '(nodes=nodes, service_context=self.service_ctx,\n storage_context=storage_ctx)\n', (2349, 2429), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage, global_service_context\n'), ((2588, 2730), 'llama_index.vector_stores.MilvusVectorStore', 'MilvusVectorStore', ([], {'dim': 'EMBEDDING_DIM', 'host': '"""127.0.0.1"""', 'port': 'default_server.listen_port', 'collection_name': 'collection_name', 'overwrite': '(False)'}), "(dim=EMBEDDING_DIM, host='127.0.0.1', port=default_server.\n listen_port, collection_name=collection_name, overwrite=False)\n", (2605, 2730), False, 'from llama_index.vector_stores import MilvusVectorStore\n'), ((2819, 2874), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (2847, 2874), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage, global_service_context\n'), ((2915, 3043), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', ([], {'vector_store': 'vector_store', 'service_context': 'self.service_ctx', 'storage_context': 'storage_ctx'}), '(vector_store=vector_store,\n service_context=self.service_ctx, storage_context=storage_ctx)\n', (2949, 3043), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage, global_service_context\n'), ((6295, 6327), 'llama_index.node_parser.SimpleNodeParser', 'SimpleNodeParser', ([], {'chunk_size': '(512)'}), '(chunk_size=512)\n', (6311, 6327), False, 'from llama_index.node_parser import SentenceWindowNodeParser, SentenceSplitter, SimpleNodeParser\n'), ((6823, 6966), 'llama_index.vector_stores.MilvusVectorStore', 'MilvusVectorStore', ([], {'dim': 'EMBEDDING_DIM', 'host': '"""127.0.0.1"""', 'port': 'default_server.listen_port', 'collection_name': 'collection_name', 'overwrite': 'create'}), "(dim=EMBEDDING_DIM, host='127.0.0.1', port=default_server.\n listen_port, collection_name=collection_name, overwrite=create)\n", (6840, 6966), False, 'from llama_index.vector_stores import MilvusVectorStore\n'), ((7056, 7111), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (7084, 7111), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage, global_service_context\n'), ((7152, 7248), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', ([], {'nodes': 'nodes', 'service_context': 'self.service_ctx', 'storage_context': 'storage_ctx'}), '(nodes=nodes, service_context=self.service_ctx,\n storage_context=storage_ctx)\n', (7168, 7248), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage, global_service_context\n'), ((7407, 7549), 'llama_index.vector_stores.MilvusVectorStore', 'MilvusVectorStore', ([], {'dim': 'EMBEDDING_DIM', 'host': '"""127.0.0.1"""', 'port': 'default_server.listen_port', 'collection_name': 'collection_name', 'overwrite': '(False)'}), "(dim=EMBEDDING_DIM, host='127.0.0.1', port=default_server.\n listen_port, collection_name=collection_name, overwrite=False)\n", (7424, 7549), False, 'from llama_index.vector_stores import MilvusVectorStore\n'), ((7638, 7693), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (7666, 7693), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage, global_service_context\n'), ((7734, 7862), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', ([], {'vector_store': 'vector_store', 'service_context': 'self.service_ctx', 'storage_context': 'storage_ctx'}), '(vector_store=vector_store,\n service_context=self.service_ctx, storage_context=storage_ctx)\n', (7768, 7862), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage, global_service_context\n'), ((1586, 1671), 'llama_index.readers.SimpleDirectoryReader', 'SimpleDirectoryReader', (['path'], {'file_metadata': 'self.metadata_fn', 'filename_as_id': '(True)'}), '(path, file_metadata=self.metadata_fn, filename_as_id=True\n )\n', (1607, 1671), False, 'from llama_index.readers import SimpleDirectoryReader\n'), ((5516, 5620), 'llama_index.readers.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': '[file_path]', 'file_metadata': 'self.metadata_fn', 'filename_as_id': '(True)'}), '(input_files=[file_path], file_metadata=self.\n metadata_fn, filename_as_id=True)\n', (5537, 5620), False, 'from llama_index.readers import SimpleDirectoryReader\n'), ((6254, 6266), 'chatbot.common.path_leaf', 'path_leaf', (['x'], {}), '(x)\n', (6263, 6266), False, 'from chatbot.common import DATA_PATH, EMBEDDING_DIM, EMBEDDING_MODEL, SIMILARITY_SEARCH_THRESHOLD, path_leaf, subjects, PathSep, debug\n'), ((6405, 6490), 'llama_index.readers.SimpleDirectoryReader', 'SimpleDirectoryReader', (['path'], {'file_metadata': 'self.metadata_fn', 'filename_as_id': '(True)'}), '(path, file_metadata=self.metadata_fn, filename_as_id=True\n )\n', (6426, 6490), False, 'from llama_index.readers import SimpleDirectoryReader\n'), ((3289, 3359), 'llama_index.postprocessor.SimilarityPostprocessor', 'SimilarityPostprocessor', ([], {'similarity_cutoff': 'SIMILARITY_SEARCH_THRESHOLD'}), '(similarity_cutoff=SIMILARITY_SEARCH_THRESHOLD)\n', (3312, 3359), False, 'from llama_index.postprocessor import SimilarityPostprocessor, MetadataReplacementPostProcessor\n'), ((3377, 3439), 'llama_index.postprocessor.MetadataReplacementPostProcessor', 'MetadataReplacementPostProcessor', ([], {'target_metadata_key': '"""window"""'}), "(target_metadata_key='window')\n", (3409, 3439), False, 'from llama_index.postprocessor import SimilarityPostprocessor, MetadataReplacementPostProcessor\n'), ((3721, 3791), 'llama_index.postprocessor.SimilarityPostprocessor', 'SimilarityPostprocessor', ([], {'similarity_cutoff': 'SIMILARITY_SEARCH_THRESHOLD'}), '(similarity_cutoff=SIMILARITY_SEARCH_THRESHOLD)\n', (3744, 3791), False, 'from llama_index.postprocessor import SimilarityPostprocessor, MetadataReplacementPostProcessor\n'), ((3809, 3871), 'llama_index.postprocessor.MetadataReplacementPostProcessor', 'MetadataReplacementPostProcessor', ([], {'target_metadata_key': '"""window"""'}), "(target_metadata_key='window')\n", (3841, 3871), False, 'from llama_index.postprocessor import SimilarityPostprocessor, MetadataReplacementPostProcessor\n'), ((8117, 8179), 'llama_index.postprocessor.MetadataReplacementPostProcessor', 'MetadataReplacementPostProcessor', ([], {'target_metadata_key': '"""window"""'}), "(target_metadata_key='window')\n", (8149, 8179), False, 'from llama_index.postprocessor import SimilarityPostprocessor, MetadataReplacementPostProcessor\n'), ((9294, 9331), 'llama_index.embeddings.HuggingFaceEmbedding', 'HuggingFaceEmbedding', (['EMBEDDING_MODEL'], {}), '(EMBEDDING_MODEL)\n', (9314, 9331), False, 'from llama_index.embeddings import HuggingFaceEmbedding\n')]
get_ipython().run_line_magic('pip', 'install llama-hub-llama-packs-agents-llm-compiler-step') get_ipython().run_line_magic('pip', 'install llama-index-readers-wikipedia') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import phoenix as px px.launch_app() import llama_index.core llama_index.core.set_global_handler("arize_phoenix") import nest_asyncio nest_asyncio.apply() from llama_index.packs.agents.llm_compiler.step import LLMCompilerAgentWorker from llama_index.core.llama_pack import download_llama_pack download_llama_pack( "LLMCompilerAgentPack", "./agent_pack", skip_load=True, ) from agent_pack.step import LLMCompilerAgentWorker import json from typing import Sequence, List from llama_index.llms.openai import OpenAI from llama_index.core.llms import ChatMessage from llama_index.core.tools import BaseTool, FunctionTool import nest_asyncio nest_asyncio.apply() def multiply(a: int, b: int) -> int: """Multiple two integers and returns the result integer""" return a * b multiply_tool = FunctionTool.from_defaults(fn=multiply) def add(a: int, b: int) -> int: """Add two integers and returns the result integer""" return a + b add_tool = FunctionTool.from_defaults(fn=add) tools = [multiply_tool, add_tool] multiply_tool.metadata.fn_schema_str from llama_index.core.agent import AgentRunner llm = OpenAI(model="gpt-4") callback_manager = llm.callback_manager agent_worker = LLMCompilerAgentWorker.from_tools( tools, llm=llm, verbose=True, callback_manager=callback_manager ) agent = AgentRunner(agent_worker, callback_manager=callback_manager) response = agent.chat("What is (121 * 3) + 42?") response agent.memory.get_all() get_ipython().system('pip install llama-index-readers-wikipedia') from llama_index.readers.wikipedia import WikipediaReader wiki_titles = ["Toronto", "Seattle", "Chicago", "Boston", "Miami"] city_docs = {} reader = WikipediaReader() for wiki_title in wiki_titles: docs = reader.load_data(pages=[wiki_title]) city_docs[wiki_title] = docs from llama_index.core import ServiceContext from llama_index.llms.openai import OpenAI from llama_index.core.callbacks import CallbackManager llm = OpenAI(temperature=0, model="gpt-4") service_context = ServiceContext.from_defaults(llm=llm) callback_manager = CallbackManager([]) from llama_index.core import load_index_from_storage, StorageContext from llama_index.core.node_parser import SentenceSplitter from llama_index.core.tools import QueryEngineTool, ToolMetadata from llama_index.core import VectorStoreIndex import os node_parser = SentenceSplitter() query_engine_tools = [] for idx, wiki_title in enumerate(wiki_titles): nodes = node_parser.get_nodes_from_documents(city_docs[wiki_title]) if not os.path.exists(f"./data/{wiki_title}"): vector_index = VectorStoreIndex( nodes, service_context=service_context, callback_manager=callback_manager ) vector_index.storage_context.persist(persist_dir=f"./data/{wiki_title}") else: vector_index = load_index_from_storage( StorageContext.from_defaults(persist_dir=f"./data/{wiki_title}"), service_context=service_context, callback_manager=callback_manager, ) vector_query_engine = vector_index.as_query_engine() query_engine_tools.append( QueryEngineTool( query_engine=vector_query_engine, metadata=ToolMetadata( name=f"vector_tool_{wiki_title}", description=( "Useful for questions related to specific aspects of" f" {wiki_title} (e.g. the history, arts and culture," " sports, demographics, or more)." ), ), ) ) from llama_index.core.agent import AgentRunner from llama_index.llms.openai import OpenAI llm = OpenAI(model="gpt-4") agent_worker = LLMCompilerAgentWorker.from_tools( query_engine_tools, llm=llm, verbose=True, callback_manager=callback_manager, ) agent = AgentRunner(agent_worker, callback_manager=callback_manager) response = agent.chat( "Tell me about the demographics of Miami, and compare that with the demographics of Chicago?" ) print(str(response)) response = agent.chat( "Is the climate of Chicago or Seattle better during the wintertime?" ) print(str(response))
[ "llama_index.core.StorageContext.from_defaults", "llama_index.core.callbacks.CallbackManager", "llama_index.core.ServiceContext.from_defaults", "llama_index.core.node_parser.SentenceSplitter", "llama_index.core.VectorStoreIndex", "llama_index.core.tools.FunctionTool.from_defaults", "llama_index.core.llama_pack.download_llama_pack", "llama_index.llms.openai.OpenAI", "llama_index.readers.wikipedia.WikipediaReader", "llama_index.core.tools.ToolMetadata", "llama_index.core.agent.AgentRunner" ]
[((266, 281), 'phoenix.launch_app', 'px.launch_app', ([], {}), '()\n', (279, 281), True, 'import phoenix as px\n'), ((385, 405), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (403, 405), False, 'import nest_asyncio\n'), ((549, 624), 'llama_index.core.llama_pack.download_llama_pack', 'download_llama_pack', (['"""LLMCompilerAgentPack"""', '"""./agent_pack"""'], {'skip_load': '(True)'}), "('LLMCompilerAgentPack', './agent_pack', skip_load=True)\n", (568, 624), False, 'from llama_index.core.llama_pack import download_llama_pack\n'), ((910, 930), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (928, 930), False, 'import nest_asyncio\n'), ((1069, 1108), 'llama_index.core.tools.FunctionTool.from_defaults', 'FunctionTool.from_defaults', ([], {'fn': 'multiply'}), '(fn=multiply)\n', (1095, 1108), False, 'from llama_index.core.tools import BaseTool, FunctionTool\n'), ((1231, 1265), 'llama_index.core.tools.FunctionTool.from_defaults', 'FunctionTool.from_defaults', ([], {'fn': 'add'}), '(fn=add)\n', (1257, 1265), False, 'from llama_index.core.tools import BaseTool, FunctionTool\n'), ((1398, 1419), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-4"""'}), "(model='gpt-4')\n", (1404, 1419), False, 'from llama_index.llms.openai import OpenAI\n'), ((1479, 1581), 'agent_pack.step.LLMCompilerAgentWorker.from_tools', 'LLMCompilerAgentWorker.from_tools', (['tools'], {'llm': 'llm', 'verbose': '(True)', 'callback_manager': 'callback_manager'}), '(tools, llm=llm, verbose=True,\n callback_manager=callback_manager)\n', (1512, 1581), False, 'from agent_pack.step import LLMCompilerAgentWorker\n'), ((1592, 1652), 'llama_index.core.agent.AgentRunner', 'AgentRunner', (['agent_worker'], {'callback_manager': 'callback_manager'}), '(agent_worker, callback_manager=callback_manager)\n', (1603, 1652), False, 'from llama_index.core.agent import AgentRunner\n'), ((1966, 1983), 'llama_index.readers.wikipedia.WikipediaReader', 'WikipediaReader', ([], {}), '()\n', (1981, 1983), False, 'from llama_index.readers.wikipedia import WikipediaReader\n'), ((2248, 2284), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model': '"""gpt-4"""'}), "(temperature=0, model='gpt-4')\n", (2254, 2284), False, 'from llama_index.llms.openai import OpenAI\n'), ((2303, 2340), 'llama_index.core.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm'}), '(llm=llm)\n', (2331, 2340), False, 'from llama_index.core import ServiceContext\n'), ((2360, 2379), 'llama_index.core.callbacks.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (2375, 2379), False, 'from llama_index.core.callbacks import CallbackManager\n'), ((2646, 2664), 'llama_index.core.node_parser.SentenceSplitter', 'SentenceSplitter', ([], {}), '()\n', (2662, 2664), False, 'from llama_index.core.node_parser import SentenceSplitter\n'), ((3946, 3967), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-4"""'}), "(model='gpt-4')\n", (3952, 3967), False, 'from llama_index.llms.openai import OpenAI\n'), ((3983, 4098), 'agent_pack.step.LLMCompilerAgentWorker.from_tools', 'LLMCompilerAgentWorker.from_tools', (['query_engine_tools'], {'llm': 'llm', 'verbose': '(True)', 'callback_manager': 'callback_manager'}), '(query_engine_tools, llm=llm, verbose=True,\n callback_manager=callback_manager)\n', (4016, 4098), False, 'from agent_pack.step import LLMCompilerAgentWorker\n'), ((4122, 4182), 'llama_index.core.agent.AgentRunner', 'AgentRunner', (['agent_worker'], {'callback_manager': 'callback_manager'}), '(agent_worker, callback_manager=callback_manager)\n', (4133, 4182), False, 'from llama_index.core.agent import AgentRunner\n'), ((2822, 2860), 'os.path.exists', 'os.path.exists', (['f"""./data/{wiki_title}"""'], {}), "(f'./data/{wiki_title}')\n", (2836, 2860), False, 'import os\n'), ((2885, 2981), 'llama_index.core.VectorStoreIndex', 'VectorStoreIndex', (['nodes'], {'service_context': 'service_context', 'callback_manager': 'callback_manager'}), '(nodes, service_context=service_context, callback_manager=\n callback_manager)\n', (2901, 2981), False, 'from llama_index.core import VectorStoreIndex\n'), ((3150, 3214), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'f"""./data/{wiki_title}"""'}), "(persist_dir=f'./data/{wiki_title}')\n", (3178, 3214), False, 'from llama_index.core import load_index_from_storage, StorageContext\n'), ((3499, 3705), 'llama_index.core.tools.ToolMetadata', 'ToolMetadata', ([], {'name': 'f"""vector_tool_{wiki_title}"""', 'description': 'f"""Useful for questions related to specific aspects of {wiki_title} (e.g. the history, arts and culture, sports, demographics, or more)."""'}), "(name=f'vector_tool_{wiki_title}', description=\n f'Useful for questions related to specific aspects of {wiki_title} (e.g. the history, arts and culture, sports, demographics, or more).'\n )\n", (3511, 3705), False, 'from llama_index.core.tools import QueryEngineTool, ToolMetadata\n')]
import os import hashlib from threading import Thread from pathlib import Path #import llama_index from openai import OpenAI import constants as c from llama_index import StorageContext, VectorStoreIndex, Document from llama_index.node_parser import SimpleNodeParser from llama_index import SimpleDirectoryReader c.Get_API() client = OpenAI() newdocspath = "" masterpath = "" basepath = "" persistpath = "" indexpath = "" class Document: __slots__ = ['text', 'doc_id', 'id_', 'hash'] def __init__(self, text: str, doc_id: str): self.text = text self.doc_id = doc_id self.id_ = doc_id self.hash = self.generate_hash(text) def generate_hash(self, text: str) -> str: return hashlib.sha256(text.encode()).hexdigest() def get_metadata_str(self, mode=None) -> str: return f"{self.doc_id}-{self.hash}" def get_content(self, metadata_mode=None) -> str: return self.text def index_document(doc: Document): print("index_document reached") index = VectorStoreIndex() index.add_document(doc) print("index doscument complete") def CreateUpdate_Index(basepath, masterdocs, newdocs, indexpath, action, tool ): print('Create/Update function running') # Check if index path directory is empty main_dir = "." indexes_dir = os.path.join(main_dir, "Indexes") chkindexpath = os.path.join(indexes_dir, tool) print('ckindexpath', chkindexpath) index_dir = Path(chkindexpath) print('index_dir',index_dir) is_empty =len(os.listdir(index_dir)) == 0 print('is empty', is_empty) if is_empty: print('Running creating index function') print(basepath, masterdocs, newdocs, index_dir, tool) Create_Index(basepath, masterdocs, newdocs, index_dir, tool ) else: print('Running updating index function') Update_Index(basepath, masterdocs, newdocs, index_dir) # print('Running creating index function') # print(basepath, masterdocs, newdocs, index_dir, tool) # Create_Index(basepath, masterdocs, newdocs, index_dir, tool ) def Create_Index(basepath: str, masterdocs: str, newdocs: str, indexpath: str, tool: str): print('Creating index') # Load documents docpath = masterdocs documents = SimpleDirectoryReader(input_dir=docpath).load_data() # Parse documents into nodes parser = SimpleNodeParser.from_defaults() nodes = parser.get_nodes_from_documents(documents) # Create index using nodes index = VectorStoreIndex(nodes=nodes) for doc in documents: index.insert(doc) # Persist index persist_path = os.path.join(basepath, indexpath) print('persist_path= ', persist_path) saveindexpath = persist_path index.storage_context.persist(saveindexpath) print('Index created and saved') # def Update_Index(basepath: str, masterdocs: str, newdocs: str, indexpath: str): # print("update index reached") # from llama_index import load_index_from_storage, Document # print('update_index indexpath', indexpath) # # try: # storage_context = StorageContext.from_defaults(persist_dir=indexpath) # new_index = load_index_from_storage(storage_context) # new_docs_dir = os.path.join(basepath, newdocs) # is_empty = len(os.listdir(newdocs)) == 0 # if not is_empty: # for filename in os.listdir(new_docs_dir): # path = os.path.join(new_docs_dir, filename) # with open(path) as f: # # Create document # text = f.read() # doc = Document(text, filename) # new_index.insert(doc) # storage_context.persist(new_index) # print("Update index completed") # except Exception as e: # print(e) def Update_Index(basepath: str, masterdocs: str, newdocs: str, indexpath: str): # Loading from disk from llama_index import StorageContext, load_index_from_storage from llama_index import PromptHelper, LLMPredictor, ServiceContext import openai openai.api_key = c.Get_API() is_empty =len(os.listdir(newdocs)) == 0 if not is_empty: storage_context = StorageContext.from_defaults(persist_dir=indexpath) index = load_index_from_storage(storage_context) new_docs_dir = os.path.join(basepath, newdocs) llm_predictor =LLMPredictor(llm=openai) max_input_size = 4096 num_outputs = 5000 max_chunk_overlap = 0.5 chunk_size_limit = 3900 prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit) service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper) reader = SimpleDirectoryReader(new_docs_dir) documents = reader.load_data() persist_path = persist_path = os.path.join(basepath, indexpath) for d in documents: index.insert(document = d, service_context = service_context) print(persist_path) storage_context.persist(persist_dir = persist_path) else: print('no new docs') def AskBuild(tool, choice): print("AskBuild reached : ", tool, choice) if choice == 'build': print("Askbuild build reached") main_dir = "." #train_dir = os.path.join(main_dir, "MyAI_Training_Docs") train_dir = ".//MyAI_Training_Docs//" train_path = os.path.join(train_dir, tool) master_dir = os.path.join(train_path, "Master") persistpath = 'Indexes//' + tool + '//' if tool == 'ai': doc_path = "ai" elif tool == 'gn': doc_path = "gn" newdocspath = train_path + "//Docs" masterpath = train_path + "//Master" print(tool, choice) print("PP: ", persistpath) print("nd: ", newdocspath) print("mp: ", masterpath) #print("bp: ", basepath) basepath = "" CreateUpdate_Index(basepath, masterpath, newdocspath, persistpath, choice, tool) print("Askbuild gn complete") elif choice == 'ask': print("Askbuild ask reached") persistpath = 'Indexes//' newdocspath = 'Docs' masterpath = 'Master' main_dir = "." basepath = os.path.join(main_dir, tool) indexpath = main_dir + '//Indexes//' + tool + '//' AskQuestion(indexpath, persistpath) print("Ask build ask complete") else: pass def AskQuestion(topic: str, action: str, question: str): from llama_index import load_index_from_storage print(topic) print("Ask question reached") indexpath = './/Indexes//' + topic + '//' print('indexpath= ', indexpath) print(os.listdir(indexpath)) storage_context = StorageContext.from_defaults(persist_dir=indexpath) new_index = load_index_from_storage(storage_context) new_query_engine = new_index.as_query_engine() while True: if question.lower() == "exit": break response = new_query_engine.query(question) print(response) print("AskQuestion complete") return response #AskBuild('gn', 'build')
[ "llama_index.LLMPredictor", "llama_index.ServiceContext.from_defaults", "llama_index.StorageContext.from_defaults", "llama_index.VectorStoreIndex", "llama_index.SimpleDirectoryReader", "llama_index.load_index_from_storage", "llama_index.node_parser.SimpleNodeParser.from_defaults", "llama_index.PromptHelper" ]
[((314, 325), 'constants.Get_API', 'c.Get_API', ([], {}), '()\n', (323, 325), True, 'import constants as c\n'), ((335, 343), 'openai.OpenAI', 'OpenAI', ([], {}), '()\n', (341, 343), False, 'from openai import OpenAI\n'), ((1027, 1045), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', ([], {}), '()\n', (1043, 1045), False, 'from llama_index import StorageContext, VectorStoreIndex, Document\n'), ((1321, 1354), 'os.path.join', 'os.path.join', (['main_dir', '"""Indexes"""'], {}), "(main_dir, 'Indexes')\n", (1333, 1354), False, 'import os\n'), ((1374, 1405), 'os.path.join', 'os.path.join', (['indexes_dir', 'tool'], {}), '(indexes_dir, tool)\n', (1386, 1405), False, 'import os\n'), ((1461, 1479), 'pathlib.Path', 'Path', (['chkindexpath'], {}), '(chkindexpath)\n', (1465, 1479), False, 'from pathlib import Path\n'), ((2370, 2402), 'llama_index.node_parser.SimpleNodeParser.from_defaults', 'SimpleNodeParser.from_defaults', ([], {}), '()\n', (2400, 2402), False, 'from llama_index.node_parser import SimpleNodeParser\n'), ((2502, 2531), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', ([], {'nodes': 'nodes'}), '(nodes=nodes)\n', (2518, 2531), False, 'from llama_index import StorageContext, VectorStoreIndex, Document\n'), ((2624, 2657), 'os.path.join', 'os.path.join', (['basepath', 'indexpath'], {}), '(basepath, indexpath)\n', (2636, 2657), False, 'import os\n'), ((4102, 4113), 'constants.Get_API', 'c.Get_API', ([], {}), '()\n', (4111, 4113), True, 'import constants as c\n'), ((6806, 6857), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'indexpath'}), '(persist_dir=indexpath)\n', (6834, 6857), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((6874, 6914), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (6897, 6914), False, 'from llama_index import load_index_from_storage\n'), ((4207, 4258), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'indexpath'}), '(persist_dir=indexpath)\n', (4235, 4258), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((4275, 4315), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (4298, 4315), False, 'from llama_index import load_index_from_storage\n'), ((4339, 4370), 'os.path.join', 'os.path.join', (['basepath', 'newdocs'], {}), '(basepath, newdocs)\n', (4351, 4370), False, 'import os\n'), ((4394, 4418), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'openai'}), '(llm=openai)\n', (4406, 4418), False, 'from llama_index import PromptHelper, LLMPredictor, ServiceContext\n'), ((4564, 4663), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_outputs', 'max_chunk_overlap'], {'chunk_size_limit': 'chunk_size_limit'}), '(max_input_size, num_outputs, max_chunk_overlap,\n chunk_size_limit=chunk_size_limit)\n', (4576, 4663), False, 'from llama_index import PromptHelper, LLMPredictor, ServiceContext\n'), ((4687, 4778), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'prompt_helper': 'prompt_helper'}), '(llm_predictor=llm_predictor, prompt_helper=\n prompt_helper)\n', (4715, 4778), False, 'from llama_index import PromptHelper, LLMPredictor, ServiceContext\n'), ((4792, 4827), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['new_docs_dir'], {}), '(new_docs_dir)\n', (4813, 4827), False, 'from llama_index import SimpleDirectoryReader\n'), ((4905, 4938), 'os.path.join', 'os.path.join', (['basepath', 'indexpath'], {}), '(basepath, indexpath)\n', (4917, 4938), False, 'import os\n'), ((5468, 5497), 'os.path.join', 'os.path.join', (['train_dir', 'tool'], {}), '(train_dir, tool)\n', (5480, 5497), False, 'import os\n'), ((5519, 5553), 'os.path.join', 'os.path.join', (['train_path', '"""Master"""'], {}), "(train_path, 'Master')\n", (5531, 5553), False, 'import os\n'), ((6761, 6782), 'os.listdir', 'os.listdir', (['indexpath'], {}), '(indexpath)\n', (6771, 6782), False, 'import os\n'), ((1531, 1552), 'os.listdir', 'os.listdir', (['index_dir'], {}), '(index_dir)\n', (1541, 1552), False, 'import os\n'), ((2270, 2310), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_dir': 'docpath'}), '(input_dir=docpath)\n', (2291, 2310), False, 'from llama_index import SimpleDirectoryReader\n'), ((4133, 4152), 'os.listdir', 'os.listdir', (['newdocs'], {}), '(newdocs)\n', (4143, 4152), False, 'import os\n'), ((6312, 6340), 'os.path.join', 'os.path.join', (['main_dir', 'tool'], {}), '(main_dir, tool)\n', (6324, 6340), False, 'import os\n')]
import logging from dataclasses import dataclass from typing import Any, List, Optional, cast import llama_index from llama_index.bridge.pydantic import BaseModel from llama_index.callbacks.base import CallbackManager from llama_index.core.embeddings.base import BaseEmbedding from llama_index.indices.prompt_helper import PromptHelper from llama_index.llm_predictor import LLMPredictor from llama_index.llm_predictor.base import BaseLLMPredictor, LLMMetadata from llama_index.llms.llm import LLM from llama_index.llms.utils import LLMType, resolve_llm from llama_index.logger import LlamaLogger from llama_index.node_parser.interface import NodeParser, TextSplitter from llama_index.node_parser.text.sentence import ( DEFAULT_CHUNK_SIZE, SENTENCE_CHUNK_OVERLAP, SentenceSplitter, ) from llama_index.prompts.base import BasePromptTemplate from llama_index.schema import TransformComponent from llama_index.types import PydanticProgramMode logger = logging.getLogger(__name__) def _get_default_node_parser( chunk_size: int = DEFAULT_CHUNK_SIZE, chunk_overlap: int = SENTENCE_CHUNK_OVERLAP, callback_manager: Optional[CallbackManager] = None, ) -> NodeParser: """Get default node parser.""" return SentenceSplitter( chunk_size=chunk_size, chunk_overlap=chunk_overlap, callback_manager=callback_manager or CallbackManager(), ) def _get_default_prompt_helper( llm_metadata: LLMMetadata, context_window: Optional[int] = None, num_output: Optional[int] = None, ) -> PromptHelper: """Get default prompt helper.""" if context_window is not None: llm_metadata.context_window = context_window if num_output is not None: llm_metadata.num_output = num_output return PromptHelper.from_llm_metadata(llm_metadata=llm_metadata) class ServiceContextData(BaseModel): llm: dict llm_predictor: dict prompt_helper: dict embed_model: dict transformations: List[dict] @dataclass class ServiceContext: """Service Context container. The service context container is a utility container for LlamaIndex index and query classes. It contains the following: - llm_predictor: BaseLLMPredictor - prompt_helper: PromptHelper - embed_model: BaseEmbedding - node_parser: NodeParser - llama_logger: LlamaLogger (deprecated) - callback_manager: CallbackManager """ llm_predictor: BaseLLMPredictor prompt_helper: PromptHelper embed_model: BaseEmbedding transformations: List[TransformComponent] llama_logger: LlamaLogger callback_manager: CallbackManager @classmethod def from_defaults( cls, llm_predictor: Optional[BaseLLMPredictor] = None, llm: Optional[LLMType] = "default", prompt_helper: Optional[PromptHelper] = None, embed_model: Optional[Any] = "default", node_parser: Optional[NodeParser] = None, text_splitter: Optional[TextSplitter] = None, transformations: Optional[List[TransformComponent]] = None, llama_logger: Optional[LlamaLogger] = None, callback_manager: Optional[CallbackManager] = None, system_prompt: Optional[str] = None, query_wrapper_prompt: Optional[BasePromptTemplate] = None, # pydantic program mode (used if output_cls is specified) pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT, # node parser kwargs chunk_size: Optional[int] = None, chunk_overlap: Optional[int] = None, # prompt helper kwargs context_window: Optional[int] = None, num_output: Optional[int] = None, # deprecated kwargs chunk_size_limit: Optional[int] = None, ) -> "ServiceContext": """Create a ServiceContext from defaults. If an argument is specified, then use the argument value provided for that parameter. If an argument is not specified, then use the default value. You can change the base defaults by setting llama_index.global_service_context to a ServiceContext object with your desired settings. Args: llm_predictor (Optional[BaseLLMPredictor]): LLMPredictor prompt_helper (Optional[PromptHelper]): PromptHelper embed_model (Optional[BaseEmbedding]): BaseEmbedding or "local" (use local model) node_parser (Optional[NodeParser]): NodeParser llama_logger (Optional[LlamaLogger]): LlamaLogger (deprecated) chunk_size (Optional[int]): chunk_size callback_manager (Optional[CallbackManager]): CallbackManager system_prompt (Optional[str]): System-wide prompt to be prepended to all input prompts, used to guide system "decision making" query_wrapper_prompt (Optional[BasePromptTemplate]): A format to wrap passed-in input queries. Deprecated Args: chunk_size_limit (Optional[int]): renamed to chunk_size """ from llama_index.embeddings.utils import EmbedType, resolve_embed_model embed_model = cast(EmbedType, embed_model) if chunk_size_limit is not None and chunk_size is None: logger.warning( "chunk_size_limit is deprecated, please specify chunk_size instead" ) chunk_size = chunk_size_limit if llama_index.global_service_context is not None: return cls.from_service_context( llama_index.global_service_context, llm=llm, llm_predictor=llm_predictor, prompt_helper=prompt_helper, embed_model=embed_model, node_parser=node_parser, text_splitter=text_splitter, llama_logger=llama_logger, callback_manager=callback_manager, context_window=context_window, chunk_size=chunk_size, chunk_size_limit=chunk_size_limit, chunk_overlap=chunk_overlap, num_output=num_output, system_prompt=system_prompt, query_wrapper_prompt=query_wrapper_prompt, transformations=transformations, ) callback_manager = callback_manager or CallbackManager([]) if llm != "default": if llm_predictor is not None: raise ValueError("Cannot specify both llm and llm_predictor") llm = resolve_llm(llm) llm.system_prompt = llm.system_prompt or system_prompt llm.query_wrapper_prompt = llm.query_wrapper_prompt or query_wrapper_prompt llm.pydantic_program_mode = ( llm.pydantic_program_mode or pydantic_program_mode ) if llm_predictor is not None: print("LLMPredictor is deprecated, please use LLM instead.") llm_predictor = llm_predictor or LLMPredictor( llm=llm, pydantic_program_mode=pydantic_program_mode ) if isinstance(llm_predictor, LLMPredictor): llm_predictor.llm.callback_manager = callback_manager if system_prompt: llm_predictor.system_prompt = system_prompt if query_wrapper_prompt: llm_predictor.query_wrapper_prompt = query_wrapper_prompt # NOTE: the embed_model isn't used in all indices # NOTE: embed model should be a transformation, but the way the service # context works, we can't put in there yet. embed_model = resolve_embed_model(embed_model) embed_model.callback_manager = callback_manager prompt_helper = prompt_helper or _get_default_prompt_helper( llm_metadata=llm_predictor.metadata, context_window=context_window, num_output=num_output, ) if text_splitter is not None and node_parser is not None: raise ValueError("Cannot specify both text_splitter and node_parser") node_parser = ( text_splitter # text splitter extends node parser or node_parser or _get_default_node_parser( chunk_size=chunk_size or DEFAULT_CHUNK_SIZE, chunk_overlap=chunk_overlap or SENTENCE_CHUNK_OVERLAP, callback_manager=callback_manager, ) ) transformations = transformations or [node_parser] llama_logger = llama_logger or LlamaLogger() return cls( llm_predictor=llm_predictor, embed_model=embed_model, prompt_helper=prompt_helper, transformations=transformations, llama_logger=llama_logger, # deprecated callback_manager=callback_manager, ) @classmethod def from_service_context( cls, service_context: "ServiceContext", llm_predictor: Optional[BaseLLMPredictor] = None, llm: Optional[LLMType] = "default", prompt_helper: Optional[PromptHelper] = None, embed_model: Optional[Any] = "default", node_parser: Optional[NodeParser] = None, text_splitter: Optional[TextSplitter] = None, transformations: Optional[List[TransformComponent]] = None, llama_logger: Optional[LlamaLogger] = None, callback_manager: Optional[CallbackManager] = None, system_prompt: Optional[str] = None, query_wrapper_prompt: Optional[BasePromptTemplate] = None, # node parser kwargs chunk_size: Optional[int] = None, chunk_overlap: Optional[int] = None, # prompt helper kwargs context_window: Optional[int] = None, num_output: Optional[int] = None, # deprecated kwargs chunk_size_limit: Optional[int] = None, ) -> "ServiceContext": """Instantiate a new service context using a previous as the defaults.""" from llama_index.embeddings.utils import EmbedType, resolve_embed_model embed_model = cast(EmbedType, embed_model) if chunk_size_limit is not None and chunk_size is None: logger.warning( "chunk_size_limit is deprecated, please specify chunk_size", DeprecationWarning, ) chunk_size = chunk_size_limit callback_manager = callback_manager or service_context.callback_manager if llm != "default": if llm_predictor is not None: raise ValueError("Cannot specify both llm and llm_predictor") llm = resolve_llm(llm) llm_predictor = LLMPredictor(llm=llm) llm_predictor = llm_predictor or service_context.llm_predictor if isinstance(llm_predictor, LLMPredictor): llm_predictor.llm.callback_manager = callback_manager if system_prompt: llm_predictor.system_prompt = system_prompt if query_wrapper_prompt: llm_predictor.query_wrapper_prompt = query_wrapper_prompt # NOTE: the embed_model isn't used in all indices # default to using the embed model passed from the service context if embed_model == "default": embed_model = service_context.embed_model embed_model = resolve_embed_model(embed_model) embed_model.callback_manager = callback_manager prompt_helper = prompt_helper or service_context.prompt_helper if context_window is not None or num_output is not None: prompt_helper = _get_default_prompt_helper( llm_metadata=llm_predictor.metadata, context_window=context_window, num_output=num_output, ) transformations = transformations or [] node_parser_found = False for transform in service_context.transformations: if isinstance(transform, NodeParser): node_parser_found = True node_parser = transform break if text_splitter is not None and node_parser is not None: raise ValueError("Cannot specify both text_splitter and node_parser") if not node_parser_found: node_parser = ( text_splitter # text splitter extends node parser or node_parser or _get_default_node_parser( chunk_size=chunk_size or DEFAULT_CHUNK_SIZE, chunk_overlap=chunk_overlap or SENTENCE_CHUNK_OVERLAP, callback_manager=callback_manager, ) ) transformations = transformations or service_context.transformations llama_logger = llama_logger or service_context.llama_logger return cls( llm_predictor=llm_predictor, embed_model=embed_model, prompt_helper=prompt_helper, transformations=transformations, llama_logger=llama_logger, # deprecated callback_manager=callback_manager, ) @property def llm(self) -> LLM: return self.llm_predictor.llm @property def node_parser(self) -> NodeParser: """Get the node parser.""" for transform in self.transformations: if isinstance(transform, NodeParser): return transform raise ValueError("No node parser found.") def to_dict(self) -> dict: """Convert service context to dict.""" llm_dict = self.llm_predictor.llm.to_dict() llm_predictor_dict = self.llm_predictor.to_dict() embed_model_dict = self.embed_model.to_dict() prompt_helper_dict = self.prompt_helper.to_dict() tranform_list_dict = [x.to_dict() for x in self.transformations] return ServiceContextData( llm=llm_dict, llm_predictor=llm_predictor_dict, prompt_helper=prompt_helper_dict, embed_model=embed_model_dict, transformations=tranform_list_dict, ).dict() @classmethod def from_dict(cls, data: dict) -> "ServiceContext": from llama_index.embeddings.loading import load_embed_model from llama_index.extractors.loading import load_extractor from llama_index.llm_predictor.loading import load_predictor from llama_index.node_parser.loading import load_parser service_context_data = ServiceContextData.parse_obj(data) llm_predictor = load_predictor(service_context_data.llm_predictor) embed_model = load_embed_model(service_context_data.embed_model) prompt_helper = PromptHelper.from_dict(service_context_data.prompt_helper) transformations: List[TransformComponent] = [] for transform in service_context_data.transformations: try: transformations.append(load_parser(transform)) except ValueError: transformations.append(load_extractor(transform)) return cls.from_defaults( llm_predictor=llm_predictor, prompt_helper=prompt_helper, embed_model=embed_model, transformations=transformations, ) def set_global_service_context(service_context: Optional[ServiceContext]) -> None: """Helper function to set the global service context.""" llama_index.global_service_context = service_context
[ "llama_index.llm_predictor.loading.load_predictor", "llama_index.embeddings.loading.load_embed_model", "llama_index.logger.LlamaLogger", "llama_index.llms.utils.resolve_llm", "llama_index.node_parser.loading.load_parser", "llama_index.callbacks.base.CallbackManager", "llama_index.indices.prompt_helper.PromptHelper.from_dict", "llama_index.extractors.loading.load_extractor", "llama_index.llm_predictor.LLMPredictor", "llama_index.indices.prompt_helper.PromptHelper.from_llm_metadata", "llama_index.embeddings.utils.resolve_embed_model" ]
[((962, 989), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (979, 989), False, 'import logging\n'), ((1764, 1821), 'llama_index.indices.prompt_helper.PromptHelper.from_llm_metadata', 'PromptHelper.from_llm_metadata', ([], {'llm_metadata': 'llm_metadata'}), '(llm_metadata=llm_metadata)\n', (1794, 1821), False, 'from llama_index.indices.prompt_helper import PromptHelper\n'), ((5128, 5156), 'typing.cast', 'cast', (['EmbedType', 'embed_model'], {}), '(EmbedType, embed_model)\n', (5132, 5156), False, 'from typing import Any, List, Optional, cast\n'), ((7575, 7607), 'llama_index.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (7594, 7607), False, 'from llama_index.embeddings.utils import EmbedType, resolve_embed_model\n'), ((10019, 10047), 'typing.cast', 'cast', (['EmbedType', 'embed_model'], {}), '(EmbedType, embed_model)\n', (10023, 10047), False, 'from typing import Any, List, Optional, cast\n'), ((11263, 11295), 'llama_index.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (11282, 11295), False, 'from llama_index.embeddings.utils import EmbedType, resolve_embed_model\n'), ((14437, 14487), 'llama_index.llm_predictor.loading.load_predictor', 'load_predictor', (['service_context_data.llm_predictor'], {}), '(service_context_data.llm_predictor)\n', (14451, 14487), False, 'from llama_index.llm_predictor.loading import load_predictor\n'), ((14511, 14561), 'llama_index.embeddings.loading.load_embed_model', 'load_embed_model', (['service_context_data.embed_model'], {}), '(service_context_data.embed_model)\n', (14527, 14561), False, 'from llama_index.embeddings.loading import load_embed_model\n'), ((14587, 14645), 'llama_index.indices.prompt_helper.PromptHelper.from_dict', 'PromptHelper.from_dict', (['service_context_data.prompt_helper'], {}), '(service_context_data.prompt_helper)\n', (14609, 14645), False, 'from llama_index.indices.prompt_helper import PromptHelper\n'), ((6319, 6338), 'llama_index.callbacks.base.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (6334, 6338), False, 'from llama_index.callbacks.base import CallbackManager\n'), ((6506, 6522), 'llama_index.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (6517, 6522), False, 'from llama_index.llms.utils import LLMType, resolve_llm\n'), ((6954, 7020), 'llama_index.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm', 'pydantic_program_mode': 'pydantic_program_mode'}), '(llm=llm, pydantic_program_mode=pydantic_program_mode)\n', (6966, 7020), False, 'from llama_index.llm_predictor import LLMPredictor\n'), ((8483, 8496), 'llama_index.logger.LlamaLogger', 'LlamaLogger', ([], {}), '()\n', (8494, 8496), False, 'from llama_index.logger import LlamaLogger\n'), ((10558, 10574), 'llama_index.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (10569, 10574), False, 'from llama_index.llms.utils import LLMType, resolve_llm\n'), ((10603, 10624), 'llama_index.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (10615, 10624), False, 'from llama_index.llm_predictor import LLMPredictor\n'), ((1363, 1380), 'llama_index.callbacks.base.CallbackManager', 'CallbackManager', ([], {}), '()\n', (1378, 1380), False, 'from llama_index.callbacks.base import CallbackManager\n'), ((14821, 14843), 'llama_index.node_parser.loading.load_parser', 'load_parser', (['transform'], {}), '(transform)\n', (14832, 14843), False, 'from llama_index.node_parser.loading import load_parser\n'), ((14915, 14940), 'llama_index.extractors.loading.load_extractor', 'load_extractor', (['transform'], {}), '(transform)\n', (14929, 14940), False, 'from llama_index.extractors.loading import load_extractor\n')]
import os import hashlib from threading import Thread from pathlib import Path #import llama_index from openai import OpenAI import constants as c c.Get_API() client = OpenAI() newdocspath = "" masterpath = "" basepath = "" persistpath = "" # test class Document: __slots__ = ['text', 'doc_id', 'id_', 'hash'] def __init__(self, text: str, doc_id: str): self.text = text self.doc_id = doc_id self.id_ = doc_id self.hash = self.generate_hash(text) def generate_hash(self, text: str) -> str: return hashlib.sha256(text.encode()).hexdigest() def get_metadata_str(self, mode=None) -> str: return f"{self.doc_id}-{self.hash}" def get_content(self, metadata_mode=None) -> str: return self.text def index_document(doc: Document): print("index_document reached") index = VectorStoreIndex() index.add_document(doc) print("index doscument complete") def CreateUpdate_Index(basepath, masterdocs, newdocs, indexpath, action, tool ): print('Create/Update function running') # Ask questions until user exits while True: # Check if index path directory is empty chkindexpath = "Z:\\MyChatBot_v1.0\\"+ tool + "\\index\\" print(chkindexpath) index_dir = Path(chkindexpath) is_empty = len(os.listdir(index_dir)) == 0 if is_empty: print('Running creating index function') Create_Index(basepath, masterdocs, newdocs, indexpath, tool ) else: print('Running updating index function') Update_Index(basepath, masterdocs, newdocs, indexpath) def Create_Index(basepath: str, masterdocs: str, newdocs: str, indexpath: str, tool): print('Creating index') from llama_index import StorageContext, VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, Document # Specify the input_dir path docpath = masterdocs documents = SimpleDirectoryReader(input_dir=docpath).load_data() # Create an index from the documents index = VectorStoreIndex.from_documents(documents) # Persist index to disk saveindexpath = basepath + indexpath index.storage_context.persist(saveindexpath) print('Index created and saved') docs_dir = os.path.join("Z:\\MyAI_Training_Docs\\", tool, "_Training_Docs\\docs") doc_paths = Path(docs_dir).glob("*") num_nodes = 8 nodes = [BaseNode() for _ in range(num_nodes)] index = VectorStoreIndex(nodes=nodes) threads = [] for path in doc_paths: with open(path) as f: text = f.read() doc = Document(text, path.name) thread = Thread(target=index_document, args=(doc,)) threads.append(thread) thread.start() for thread in threads: thread.join() storage_context = StorageContext(indexdir=indexpath) storage_context.persist(index) print("Create index complete") def Update_Index(basepath: str, masterdocs: str, newdocs: str, indexpath: str): print("update index reached") import os from llama_index import load_index_from_storage storage_context = StorageContext.from_defaults(indexpath) index = load_index_from_storage(storage_context) new_docs_dir = os.path.join(basepath, newdocs) for filename in os.listdir(new_docs_dir): path = os.path.join(new_docs_dir, filename) with open(path) as f: text = f.read() doc = Document(text, filename) index.add_document(doc) storage_context.persist(index) print("Update index completed") def AskBuild(tool, choice): print("AskBuild reached : ", tool, choice) if choice == 'build': print("Askbuild build reached") basepath = 'Z:\\MyAI_Training_Docs\\' persistpath = 'Index\\Index\\' if tool == 'ai': doc_path = "AI" elif tool == 'gn': doc_path = "GN" newdocspath = basepath + doc_path + "_Training_Docs\\Docs" masterpath = basepath + doc_path + "_Training_Docs\\Master" print(tool, choice) print("PP: ", persistpath) print("nd: ", newdocspath) print("mp: ", masterpath) print("bp: ", basepath) CreateUpdate_Index(basepath, masterpath, newdocspath, persistpath, choice, tool) print("Askbuild GN complete") elif choice == 'ask': print("Askbuild ask reached") persistpath = 'Index\\Index\\' newdocspath = 'Docs' masterpath = 'Master' basepath = 'Z:\\MyChatBot_v1.0\\' + tool + '\\' AskQuestion(basepath, persistpath) print("Ask build ask complete") else: pass def AskQuestion(indexpath: str): print("Ask question reached") storage_context = StorageContext.from_defaults(indexpath) index = load_index_from_storage(storage_context) while True: question = input("Enter question: ") if question.lower() == "exit": break response = index.query(question) print(response) print("AskQuestion complete")
[ "llama_index.VectorStoreIndex.from_documents", "llama_index.StorageContext", "llama_index.StorageContext.from_defaults", "llama_index.VectorStoreIndex", "llama_index.SimpleDirectoryReader", "llama_index.load_index_from_storage", "llama_index.Document" ]
[((147, 158), 'constants.Get_API', 'c.Get_API', ([], {}), '()\n', (156, 158), True, 'import constants as c\n'), ((168, 176), 'openai.OpenAI', 'OpenAI', ([], {}), '()\n', (174, 176), False, 'from openai import OpenAI\n'), ((854, 872), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', ([], {}), '()\n', (870, 872), False, 'from llama_index import StorageContext, VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, Document\n'), ((2052, 2094), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (2083, 2094), False, 'from llama_index import StorageContext, VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, Document\n'), ((2268, 2338), 'os.path.join', 'os.path.join', (['"""Z:\\\\MyAI_Training_Docs\\\\"""', 'tool', '"""_Training_Docs\\\\docs"""'], {}), "('Z:\\\\MyAI_Training_Docs\\\\', tool, '_Training_Docs\\\\docs')\n", (2280, 2338), False, 'import os\n'), ((2462, 2491), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', ([], {'nodes': 'nodes'}), '(nodes=nodes)\n', (2478, 2491), False, 'from llama_index import StorageContext, VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, Document\n'), ((2822, 2856), 'llama_index.StorageContext', 'StorageContext', ([], {'indexdir': 'indexpath'}), '(indexdir=indexpath)\n', (2836, 2856), False, 'from llama_index import StorageContext, VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, Document\n'), ((3131, 3170), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', (['indexpath'], {}), '(indexpath)\n', (3159, 3170), False, 'from llama_index import StorageContext, VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, Document\n'), ((3183, 3223), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (3206, 3223), False, 'from llama_index import load_index_from_storage\n'), ((3244, 3275), 'os.path.join', 'os.path.join', (['basepath', 'newdocs'], {}), '(basepath, newdocs)\n', (3256, 3275), False, 'import os\n'), ((3296, 3320), 'os.listdir', 'os.listdir', (['new_docs_dir'], {}), '(new_docs_dir)\n', (3306, 3320), False, 'import os\n'), ((4751, 4790), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', (['indexpath'], {}), '(indexpath)\n', (4779, 4790), False, 'from llama_index import StorageContext, VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, Document\n'), ((4803, 4843), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (4826, 4843), False, 'from llama_index import load_index_from_storage\n'), ((1282, 1300), 'pathlib.Path', 'Path', (['chkindexpath'], {}), '(chkindexpath)\n', (1286, 1300), False, 'from pathlib import Path\n'), ((2609, 2634), 'llama_index.Document', 'Document', (['text', 'path.name'], {}), '(text, path.name)\n', (2617, 2634), False, 'from llama_index import StorageContext, VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, Document\n'), ((2652, 2694), 'threading.Thread', 'Thread', ([], {'target': 'index_document', 'args': '(doc,)'}), '(target=index_document, args=(doc,))\n', (2658, 2694), False, 'from threading import Thread\n'), ((3337, 3373), 'os.path.join', 'os.path.join', (['new_docs_dir', 'filename'], {}), '(new_docs_dir, filename)\n', (3349, 3373), False, 'import os\n'), ((3446, 3470), 'llama_index.Document', 'Document', (['text', 'filename'], {}), '(text, filename)\n', (3454, 3470), False, 'from llama_index import StorageContext, VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, Document\n'), ((1945, 1985), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_dir': 'docpath'}), '(input_dir=docpath)\n', (1966, 1985), False, 'from llama_index import StorageContext, VectorStoreIndex, SimpleDirectoryReader, load_index_from_storage, Document\n'), ((2355, 2369), 'pathlib.Path', 'Path', (['docs_dir'], {}), '(docs_dir)\n', (2359, 2369), False, 'from pathlib import Path\n'), ((1324, 1345), 'os.listdir', 'os.listdir', (['index_dir'], {}), '(index_dir)\n', (1334, 1345), False, 'import os\n')]
import dataclasses import logging from dataclasses import dataclass from typing import Optional from llama_index.bridge.langchain import BaseLanguageModel import llama_index from llama_index.callbacks.base import CallbackManager from llama_index.embeddings.base import BaseEmbedding from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.indices.prompt_helper import PromptHelper from llama_index.langchain_helpers.chain_wrapper import LLMPredictor from llama_index.llm_predictor.base import BaseLLMPredictor, LLMMetadata from llama_index.logger import LlamaLogger from llama_index.node_parser.interface import NodeParser from llama_index.node_parser.simple import SimpleNodeParser logger = logging.getLogger(__name__) def _get_default_node_parser( chunk_size: Optional[int] = None, chunk_overlap: Optional[int] = None, callback_manager: Optional[CallbackManager] = None, ) -> NodeParser: """Get default node parser.""" return SimpleNodeParser.from_defaults( chunk_size=chunk_size, chunk_overlap=chunk_overlap, callback_manager=callback_manager, ) def _get_default_prompt_helper( llm_metadata: LLMMetadata, context_window: Optional[int] = None, num_output: Optional[int] = None, ) -> PromptHelper: """Get default prompt helper.""" if context_window is not None: llm_metadata = dataclasses.replace(llm_metadata, context_window=context_window) if num_output is not None: llm_metadata = dataclasses.replace(llm_metadata, num_output=num_output) return PromptHelper.from_llm_metadata(llm_metadata=llm_metadata) @dataclass class ServiceContext: """Service Context container. The service context container is a utility container for LlamaIndex index and query classes. It contains the following: - llm_predictor: BaseLLMPredictor - prompt_helper: PromptHelper - embed_model: BaseEmbedding - node_parser: NodeParser - llama_logger: LlamaLogger (deprecated) - callback_manager: CallbackManager """ llm_predictor: BaseLLMPredictor prompt_helper: PromptHelper embed_model: BaseEmbedding node_parser: NodeParser llama_logger: LlamaLogger callback_manager: CallbackManager @classmethod def from_defaults( cls, llm_predictor: Optional[BaseLLMPredictor] = None, llm: Optional[BaseLanguageModel] = None, prompt_helper: Optional[PromptHelper] = None, embed_model: Optional[BaseEmbedding] = None, node_parser: Optional[NodeParser] = None, llama_logger: Optional[LlamaLogger] = None, callback_manager: Optional[CallbackManager] = None, # node parser kwargs chunk_size: Optional[int] = None, chunk_overlap: Optional[int] = None, # prompt helper kwargs context_window: Optional[int] = None, num_output: Optional[int] = None, # deprecated kwargs chunk_size_limit: Optional[int] = None, ) -> "ServiceContext": """Create a ServiceContext from defaults. If an argument is specified, then use the argument value provided for that parameter. If an argument is not specified, then use the default value. You can change the base defaults by setting llama_index.global_service_context to a ServiceContext object with your desired settings. Args: llm_predictor (Optional[BaseLLMPredictor]): LLMPredictor prompt_helper (Optional[PromptHelper]): PromptHelper embed_model (Optional[BaseEmbedding]): BaseEmbedding node_parser (Optional[NodeParser]): NodeParser llama_logger (Optional[LlamaLogger]): LlamaLogger (deprecated) chunk_size (Optional[int]): chunk_size callback_manager (Optional[CallbackManager]): CallbackManager Deprecated Args: chunk_size_limit (Optional[int]): renamed to chunk_size """ if chunk_size_limit is not None and chunk_size is None: logger.warning( "chunk_size_limit is deprecated, please specify chunk_size instead" ) chunk_size = chunk_size_limit if llama_index.global_service_context is not None: return cls.from_service_context( llama_index.global_service_context, llm_predictor=llm_predictor, prompt_helper=prompt_helper, embed_model=embed_model, node_parser=node_parser, llama_logger=llama_logger, callback_manager=callback_manager, chunk_size=chunk_size, chunk_size_limit=chunk_size_limit, ) callback_manager = callback_manager or CallbackManager([]) if llm is not None: if llm_predictor is not None: raise ValueError("Cannot specify both llm and llm_predictor") llm_predictor = LLMPredictor(llm=llm) llm_predictor = llm_predictor or LLMPredictor() llm_predictor.callback_manager = callback_manager # NOTE: the embed_model isn't used in all indices embed_model = embed_model or OpenAIEmbedding() embed_model.callback_manager = callback_manager prompt_helper = prompt_helper or _get_default_prompt_helper( llm_metadata=llm_predictor.get_llm_metadata(), context_window=context_window, num_output=num_output, ) node_parser = node_parser or _get_default_node_parser( chunk_size=chunk_size, chunk_overlap=chunk_overlap, callback_manager=callback_manager, ) llama_logger = llama_logger or LlamaLogger() return cls( llm_predictor=llm_predictor, embed_model=embed_model, prompt_helper=prompt_helper, node_parser=node_parser, llama_logger=llama_logger, # deprecated callback_manager=callback_manager, ) @classmethod def from_service_context( cls, service_context: "ServiceContext", llm_predictor: Optional[BaseLLMPredictor] = None, llm: Optional[BaseLanguageModel] = None, prompt_helper: Optional[PromptHelper] = None, embed_model: Optional[BaseEmbedding] = None, node_parser: Optional[NodeParser] = None, llama_logger: Optional[LlamaLogger] = None, callback_manager: Optional[CallbackManager] = None, # node parser kwargs chunk_size: Optional[int] = None, chunk_overlap: Optional[int] = None, # prompt helper kwargs context_window: Optional[int] = None, num_output: Optional[int] = None, # deprecated kwargs chunk_size_limit: Optional[int] = None, ) -> "ServiceContext": """Instantiate a new service context using a previous as the defaults.""" if chunk_size_limit is not None and chunk_size is None: logger.warning( "chunk_size_limit is deprecated, please specify chunk_size", DeprecationWarning, ) chunk_size = chunk_size_limit callback_manager = callback_manager or service_context.callback_manager if llm is not None: if llm_predictor is not None: raise ValueError("Cannot specify both llm and llm_predictor") llm_predictor = LLMPredictor(llm=llm) llm_predictor = llm_predictor or service_context.llm_predictor llm_predictor.callback_manager = callback_manager # NOTE: the embed_model isn't used in all indices embed_model = embed_model or service_context.embed_model embed_model.callback_manager = callback_manager prompt_helper = prompt_helper or _get_default_prompt_helper( llm_metadata=llm_predictor.get_llm_metadata(), context_window=context_window, num_output=num_output, ) node_parser = node_parser or service_context.node_parser if chunk_size is not None or chunk_overlap is not None: node_parser = _get_default_node_parser( chunk_size=chunk_size, chunk_overlap=chunk_overlap, callback_manager=callback_manager, ) llama_logger = llama_logger or service_context.llama_logger return cls( llm_predictor=llm_predictor, embed_model=embed_model, prompt_helper=prompt_helper, node_parser=node_parser, llama_logger=llama_logger, # deprecated callback_manager=callback_manager, ) def set_global_service_context(service_context: Optional[ServiceContext]) -> None: """Helper function to set the global service context.""" llama_index.global_service_context = service_context
[ "llama_index.langchain_helpers.chain_wrapper.LLMPredictor", "llama_index.callbacks.base.CallbackManager", "llama_index.node_parser.simple.SimpleNodeParser.from_defaults", "llama_index.logger.LlamaLogger", "llama_index.indices.prompt_helper.PromptHelper.from_llm_metadata", "llama_index.embeddings.openai.OpenAIEmbedding" ]
[((714, 741), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (731, 741), False, 'import logging\n'), ((972, 1094), 'llama_index.node_parser.simple.SimpleNodeParser.from_defaults', 'SimpleNodeParser.from_defaults', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap', 'callback_manager': 'callback_manager'}), '(chunk_size=chunk_size, chunk_overlap=\n chunk_overlap, callback_manager=callback_manager)\n', (1002, 1094), False, 'from llama_index.node_parser.simple import SimpleNodeParser\n'), ((1567, 1624), 'llama_index.indices.prompt_helper.PromptHelper.from_llm_metadata', 'PromptHelper.from_llm_metadata', ([], {'llm_metadata': 'llm_metadata'}), '(llm_metadata=llm_metadata)\n', (1597, 1624), False, 'from llama_index.indices.prompt_helper import PromptHelper\n'), ((1380, 1444), 'dataclasses.replace', 'dataclasses.replace', (['llm_metadata'], {'context_window': 'context_window'}), '(llm_metadata, context_window=context_window)\n', (1399, 1444), False, 'import dataclasses\n'), ((1499, 1555), 'dataclasses.replace', 'dataclasses.replace', (['llm_metadata'], {'num_output': 'num_output'}), '(llm_metadata, num_output=num_output)\n', (1518, 1555), False, 'import dataclasses\n'), ((4767, 4786), 'llama_index.callbacks.base.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (4782, 4786), False, 'from llama_index.callbacks.base import CallbackManager\n'), ((4963, 4984), 'llama_index.langchain_helpers.chain_wrapper.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (4975, 4984), False, 'from llama_index.langchain_helpers.chain_wrapper import LLMPredictor\n'), ((5026, 5040), 'llama_index.langchain_helpers.chain_wrapper.LLMPredictor', 'LLMPredictor', ([], {}), '()\n', (5038, 5040), False, 'from llama_index.langchain_helpers.chain_wrapper import LLMPredictor\n'), ((5195, 5212), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '()\n', (5210, 5212), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n'), ((5723, 5736), 'llama_index.logger.LlamaLogger', 'LlamaLogger', ([], {}), '()\n', (5734, 5736), False, 'from llama_index.logger import LlamaLogger\n'), ((7442, 7463), 'llama_index.langchain_helpers.chain_wrapper.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (7454, 7463), False, 'from llama_index.langchain_helpers.chain_wrapper import LLMPredictor\n')]
import llama_index.core llama_index.core.set_global_handler("simple") from llama_index.core import SimpleDirectoryReader from llama_index.core.node_parser import SimpleFileNodeParser from llama_index.core import VectorStoreIndex #Loading documents = SimpleDirectoryReader("dataset/txt").load_data() print(documents[0]) #parser = SimpleFileNodeParser() #nodes = parser.get_nodes_from_documents(documents) #print(nodes[0]) #Index index = VectorStoreIndex.from_documents(documents) print(index) #Querying query_engine = index.as_query_engine() print(query_engine) response = query_engine.query("What is AIGC?") print(response) #Agent from llama_index.core.indices.query.query_transform.base import StepDecomposeQueryTransform from llama_index.core.query_engine import MultiStepQueryEngine from llama_index.core.query_engine import SubQuestionQueryEngine DEFAULT_STEP_DECOMPOSE_QUERY_TRANSFORM_TMPL = ( "The original question is as follows: {query_str}\n" "We have an opportunity to answer some, or all of the question from a " "knowledge source. " "Context information for the knowledge source is provided below, as " "well as previous reasoning steps.\n" "Given the context and previous reasoning, return a question that can " "be answered from " "the context. This question can be the same as the original question, " "or this question can represent a subcomponent of the overall question." "It should not be irrelevant to the original question.\n" "If we cannot extract more information from the context, provide 'None' " "as the answer. " "Some examples are given below: " "\n\n" "Question: How many Grand Slam titles does the winner of the 2020 Australian " "Open have?\n" "Knowledge source context: Provides names of the winners of the 2020 " "Australian Open\n" "Previous reasoning: None\n" "Next question: Who was the winner of the 2020 Australian Open? " "\n\n" "Question: Who was the winner of the 2020 Australian Open?\n" "Knowledge source context: Provides names of the winners of the 2020 " "Australian Open\n" "Previous reasoning: None.\n" "New question: Who was the winner of the 2020 Australian Open? " "\n\n" "Question: How many Grand Slam titles does the winner of the 2020 Australian " "Open have?\n" "Knowledge source context: Provides information about the winners of the 2020 " "Australian Open\n" "Previous reasoning:\n" "- Who was the winner of the 2020 Australian Open? \n" "- The winner of the 2020 Australian Open was Novak Djokovic.\n" "New question: None" "\n\n" "Question: How many Grand Slam titles does the winner of the 2020 Australian " "Open have?\n" "Knowledge source context: Provides information about the winners of the 2020 " "Australian Open - includes biographical information for each winner\n" "Previous reasoning:\n" "- Who was the winner of the 2020 Australian Open? \n" "- The winner of the 2020 Australian Open was Novak Djokovic.\n" "New question: How many Grand Slam titles does Novak Djokovic have? " "\n\n" "Question: {query_str}\n" "Knowledge source context: {context_str}\n" "Previous reasoning: {prev_reasoning}\n" "New question: " ) print(DEFAULT_STEP_DECOMPOSE_QUERY_TRANSFORM_TMPL)
[ "llama_index.core.VectorStoreIndex.from_documents", "llama_index.core.SimpleDirectoryReader" ]
[((445, 487), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (476, 487), False, 'from llama_index.core import VectorStoreIndex\n'), ((257, 293), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""dataset/txt"""'], {}), "('dataset/txt')\n", (278, 293), False, 'from llama_index.core import SimpleDirectoryReader\n')]
get_ipython().run_line_magic('pip', 'install llama-index-embeddings-openai') get_ipython().run_line_magic('pip', 'install llama-index-postprocessor-cohere-rerank') get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') import phoenix as px px.launch_app() import llama_index.core llama_index.core.set_global_handler("arize_phoenix") from llama_index.llms.openai import OpenAI from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.core import Settings Settings.llm = OpenAI(model="gpt-3.5-turbo") Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small") from llama_index.core import SimpleDirectoryReader reader = SimpleDirectoryReader("../data/paul_graham") docs = reader.load_data() import os from llama_index.core import ( StorageContext, VectorStoreIndex, load_index_from_storage, ) if not os.path.exists("storage"): index = VectorStoreIndex.from_documents(docs) index.set_index_id("vector_index") index.storage_context.persist("./storage") else: storage_context = StorageContext.from_defaults(persist_dir="storage") index = load_index_from_storage(storage_context, index_id="vector_index") from llama_index.core.query_pipeline import QueryPipeline from llama_index.core import PromptTemplate prompt_str = "Please generate related movies to {movie_name}" prompt_tmpl = PromptTemplate(prompt_str) llm = OpenAI(model="gpt-3.5-turbo") p = QueryPipeline(chain=[prompt_tmpl, llm], verbose=True) output = p.run(movie_name="The Departed") print(str(output)) from typing import List from pydantic import BaseModel, Field from llama_index.core.output_parsers import PydanticOutputParser class Movie(BaseModel): """Object representing a single movie.""" name: str = Field(..., description="Name of the movie.") year: int = Field(..., description="Year of the movie.") class Movies(BaseModel): """Object representing a list of movies.""" movies: List[Movie] = Field(..., description="List of movies.") llm = OpenAI(model="gpt-3.5-turbo") output_parser = PydanticOutputParser(Movies) json_prompt_str = """\ Please generate related movies to {movie_name}. Output with the following JSON format: """ json_prompt_str = output_parser.format(json_prompt_str) json_prompt_tmpl = PromptTemplate(json_prompt_str) p = QueryPipeline(chain=[json_prompt_tmpl, llm, output_parser], verbose=True) output = p.run(movie_name="Toy Story") output prompt_str = "Please generate related movies to {movie_name}" prompt_tmpl = PromptTemplate(prompt_str) prompt_str2 = """\ Here's some text: {text} Can you rewrite this with a summary of each movie? """ prompt_tmpl2 = PromptTemplate(prompt_str2) llm = OpenAI(model="gpt-3.5-turbo") llm_c = llm.as_query_component(streaming=True) p = QueryPipeline( chain=[prompt_tmpl, llm_c, prompt_tmpl2, llm_c], verbose=True ) output = p.run(movie_name="The Dark Knight") for o in output: print(o.delta, end="") p = QueryPipeline( chain=[ json_prompt_tmpl, llm.as_query_component(streaming=True), output_parser, ], verbose=True, ) output = p.run(movie_name="Toy Story") print(output) from llama_index.postprocessor.cohere_rerank import CohereRerank prompt_str1 = "Please generate a concise question about Paul Graham's life regarding the following topic {topic}" prompt_tmpl1 = PromptTemplate(prompt_str1) prompt_str2 = ( "Please write a passage to answer the question\n" "Try to include as many key details as possible.\n" "\n" "\n" "{query_str}\n" "\n" "\n" 'Passage:"""\n' ) prompt_tmpl2 = PromptTemplate(prompt_str2) llm = OpenAI(model="gpt-3.5-turbo") retriever = index.as_retriever(similarity_top_k=5) p = QueryPipeline( chain=[prompt_tmpl1, llm, prompt_tmpl2, llm, retriever], verbose=True ) nodes = p.run(topic="college") len(nodes) from llama_index.postprocessor.cohere_rerank import CohereRerank from llama_index.core.response_synthesizers import TreeSummarize prompt_str = "Please generate a question about Paul Graham's life regarding the following topic {topic}" prompt_tmpl = PromptTemplate(prompt_str) llm = OpenAI(model="gpt-3.5-turbo") retriever = index.as_retriever(similarity_top_k=3) reranker = CohereRerank() summarizer = TreeSummarize(llm=llm) p = QueryPipeline(verbose=True) p.add_modules( { "llm": llm, "prompt_tmpl": prompt_tmpl, "retriever": retriever, "summarizer": summarizer, "reranker": reranker, } ) p.add_link("prompt_tmpl", "llm") p.add_link("llm", "retriever") p.add_link("retriever", "reranker", dest_key="nodes") p.add_link("llm", "reranker", dest_key="query_str") p.add_link("reranker", "summarizer", dest_key="nodes") p.add_link("llm", "summarizer", dest_key="query_str") print(summarizer.as_query_component().input_keys) from pyvis.network import Network net = Network(notebook=True, cdn_resources="in_line", directed=True) net.from_nx(p.dag) net.show("rag_dag.html") response = p.run(topic="YC") print(str(response)) response = await p.arun(topic="YC") print(str(response)) from llama_index.postprocessor.cohere_rerank import CohereRerank from llama_index.core.response_synthesizers import TreeSummarize from llama_index.core.query_pipeline import InputComponent retriever = index.as_retriever(similarity_top_k=5) summarizer = TreeSummarize(llm=OpenAI(model="gpt-3.5-turbo")) reranker = CohereRerank() p = QueryPipeline(verbose=True) p.add_modules( { "input": InputComponent(), "retriever": retriever, "summarizer": summarizer, } ) p.add_link("input", "retriever") p.add_link("input", "summarizer", dest_key="query_str") p.add_link("retriever", "summarizer", dest_key="nodes") output = p.run(input="what did the author do in YC") print(str(output)) from llama_index.core.query_pipeline import ( CustomQueryComponent, InputKeys, OutputKeys, ) from typing import Dict, Any from llama_index.core.llms.llm import LLM from pydantic import Field class RelatedMovieComponent(CustomQueryComponent): """Related movie component.""" llm: LLM = Field(..., description="OpenAI LLM") def _validate_component_inputs( self, input: Dict[str, Any] ) -> Dict[str, Any]: """Validate component inputs during run_component.""" return input @property def _input_keys(self) -> set: """Input keys dict.""" return {"movie"} @property def _output_keys(self) -> set: return {"output"} def _run_component(self, **kwargs) -> Dict[str, Any]: """Run the component.""" prompt_str = "Please generate related movies to {movie_name}" prompt_tmpl = PromptTemplate(prompt_str) p = QueryPipeline(chain=[prompt_tmpl, llm]) return {"output": p.run(movie_name=kwargs["movie"])} llm = OpenAI(model="gpt-3.5-turbo") component = RelatedMovieComponent(llm=llm) prompt_str = """\ Here's some text: {text} Can you rewrite this in the voice of Shakespeare? """ prompt_tmpl = PromptTemplate(prompt_str) p = QueryPipeline(chain=[component, prompt_tmpl, llm], verbose=True) output = p.run(movie="Love Actually") print(str(output))
[ "llama_index.postprocessor.cohere_rerank.CohereRerank", "llama_index.core.StorageContext.from_defaults", "llama_index.core.output_parsers.PydanticOutputParser", "llama_index.core.PromptTemplate", "llama_index.core.response_synthesizers.TreeSummarize", "llama_index.core.query_pipeline.QueryPipeline", "llama_index.core.query_pipeline.InputComponent", "llama_index.core.VectorStoreIndex.from_documents", "llama_index.core.SimpleDirectoryReader", "llama_index.llms.openai.OpenAI", "llama_index.core.load_index_from_storage", "llama_index.embeddings.openai.OpenAIEmbedding" ]
[((259, 274), 'phoenix.launch_app', 'px.launch_app', ([], {}), '()\n', (272, 274), True, 'import phoenix as px\n'), ((510, 539), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""'}), "(model='gpt-3.5-turbo')\n", (516, 539), False, 'from llama_index.llms.openai import OpenAI\n'), ((563, 610), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {'model': '"""text-embedding-3-small"""'}), "(model='text-embedding-3-small')\n", (578, 610), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n'), ((674, 718), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""../data/paul_graham"""'], {}), "('../data/paul_graham')\n", (695, 718), False, 'from llama_index.core import SimpleDirectoryReader\n'), ((1374, 1400), 'llama_index.core.PromptTemplate', 'PromptTemplate', (['prompt_str'], {}), '(prompt_str)\n', (1388, 1400), False, 'from llama_index.core import PromptTemplate\n'), ((1407, 1436), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""'}), "(model='gpt-3.5-turbo')\n", (1413, 1436), False, 'from llama_index.llms.openai import OpenAI\n'), ((1442, 1495), 'llama_index.core.query_pipeline.QueryPipeline', 'QueryPipeline', ([], {'chain': '[prompt_tmpl, llm]', 'verbose': '(True)'}), '(chain=[prompt_tmpl, llm], verbose=True)\n', (1455, 1495), False, 'from llama_index.core.query_pipeline import QueryPipeline\n'), ((2038, 2067), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""'}), "(model='gpt-3.5-turbo')\n", (2044, 2067), False, 'from llama_index.llms.openai import OpenAI\n'), ((2084, 2112), 'llama_index.core.output_parsers.PydanticOutputParser', 'PydanticOutputParser', (['Movies'], {}), '(Movies)\n', (2104, 2112), False, 'from llama_index.core.output_parsers import PydanticOutputParser\n'), ((2305, 2336), 'llama_index.core.PromptTemplate', 'PromptTemplate', (['json_prompt_str'], {}), '(json_prompt_str)\n', (2319, 2336), False, 'from llama_index.core import PromptTemplate\n'), ((2342, 2415), 'llama_index.core.query_pipeline.QueryPipeline', 'QueryPipeline', ([], {'chain': '[json_prompt_tmpl, llm, output_parser]', 'verbose': '(True)'}), '(chain=[json_prompt_tmpl, llm, output_parser], verbose=True)\n', (2355, 2415), False, 'from llama_index.core.query_pipeline import QueryPipeline\n'), ((2544, 2570), 'llama_index.core.PromptTemplate', 'PromptTemplate', (['prompt_str'], {}), '(prompt_str)\n', (2558, 2570), False, 'from llama_index.core import PromptTemplate\n'), ((2687, 2714), 'llama_index.core.PromptTemplate', 'PromptTemplate', (['prompt_str2'], {}), '(prompt_str2)\n', (2701, 2714), False, 'from llama_index.core import PromptTemplate\n'), ((2721, 2750), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""'}), "(model='gpt-3.5-turbo')\n", (2727, 2750), False, 'from llama_index.llms.openai import OpenAI\n'), ((2803, 2879), 'llama_index.core.query_pipeline.QueryPipeline', 'QueryPipeline', ([], {'chain': '[prompt_tmpl, llm_c, prompt_tmpl2, llm_c]', 'verbose': '(True)'}), '(chain=[prompt_tmpl, llm_c, prompt_tmpl2, llm_c], verbose=True)\n', (2816, 2879), False, 'from llama_index.core.query_pipeline import QueryPipeline\n'), ((3388, 3415), 'llama_index.core.PromptTemplate', 'PromptTemplate', (['prompt_str1'], {}), '(prompt_str1)\n', (3402, 3415), False, 'from llama_index.core import PromptTemplate\n'), ((3635, 3662), 'llama_index.core.PromptTemplate', 'PromptTemplate', (['prompt_str2'], {}), '(prompt_str2)\n', (3649, 3662), False, 'from llama_index.core import PromptTemplate\n'), ((3670, 3699), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""'}), "(model='gpt-3.5-turbo')\n", (3676, 3699), False, 'from llama_index.llms.openai import OpenAI\n'), ((3755, 3843), 'llama_index.core.query_pipeline.QueryPipeline', 'QueryPipeline', ([], {'chain': '[prompt_tmpl1, llm, prompt_tmpl2, llm, retriever]', 'verbose': '(True)'}), '(chain=[prompt_tmpl1, llm, prompt_tmpl2, llm, retriever],\n verbose=True)\n', (3768, 3843), False, 'from llama_index.core.query_pipeline import QueryPipeline\n'), ((4145, 4171), 'llama_index.core.PromptTemplate', 'PromptTemplate', (['prompt_str'], {}), '(prompt_str)\n', (4159, 4171), False, 'from llama_index.core import PromptTemplate\n'), ((4178, 4207), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""'}), "(model='gpt-3.5-turbo')\n", (4184, 4207), False, 'from llama_index.llms.openai import OpenAI\n'), ((4270, 4284), 'llama_index.postprocessor.cohere_rerank.CohereRerank', 'CohereRerank', ([], {}), '()\n', (4282, 4284), False, 'from llama_index.postprocessor.cohere_rerank import CohereRerank\n'), ((4298, 4320), 'llama_index.core.response_synthesizers.TreeSummarize', 'TreeSummarize', ([], {'llm': 'llm'}), '(llm=llm)\n', (4311, 4320), False, 'from llama_index.core.response_synthesizers import TreeSummarize\n'), ((4327, 4354), 'llama_index.core.query_pipeline.QueryPipeline', 'QueryPipeline', ([], {'verbose': '(True)'}), '(verbose=True)\n', (4340, 4354), False, 'from llama_index.core.query_pipeline import QueryPipeline\n'), ((4913, 4975), 'pyvis.network.Network', 'Network', ([], {'notebook': '(True)', 'cdn_resources': '"""in_line"""', 'directed': '(True)'}), "(notebook=True, cdn_resources='in_line', directed=True)\n", (4920, 4975), False, 'from pyvis.network import Network\n'), ((5451, 5465), 'llama_index.postprocessor.cohere_rerank.CohereRerank', 'CohereRerank', ([], {}), '()\n', (5463, 5465), False, 'from llama_index.postprocessor.cohere_rerank import CohereRerank\n'), ((5472, 5499), 'llama_index.core.query_pipeline.QueryPipeline', 'QueryPipeline', ([], {'verbose': '(True)'}), '(verbose=True)\n', (5485, 5499), False, 'from llama_index.core.query_pipeline import QueryPipeline\n'), ((6893, 6922), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""'}), "(model='gpt-3.5-turbo')\n", (6899, 6922), False, 'from llama_index.llms.openai import OpenAI\n'), ((7080, 7106), 'llama_index.core.PromptTemplate', 'PromptTemplate', (['prompt_str'], {}), '(prompt_str)\n', (7094, 7106), False, 'from llama_index.core import PromptTemplate\n'), ((7112, 7176), 'llama_index.core.query_pipeline.QueryPipeline', 'QueryPipeline', ([], {'chain': '[component, prompt_tmpl, llm]', 'verbose': '(True)'}), '(chain=[component, prompt_tmpl, llm], verbose=True)\n', (7125, 7176), False, 'from llama_index.core.query_pipeline import QueryPipeline\n'), ((871, 896), 'os.path.exists', 'os.path.exists', (['"""storage"""'], {}), "('storage')\n", (885, 896), False, 'import os\n'), ((910, 947), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['docs'], {}), '(docs)\n', (941, 947), False, 'from llama_index.core import StorageContext, VectorStoreIndex, load_index_from_storage\n'), ((1062, 1113), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""storage"""'}), "(persist_dir='storage')\n", (1090, 1113), False, 'from llama_index.core import StorageContext, VectorStoreIndex, load_index_from_storage\n'), ((1126, 1191), 'llama_index.core.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {'index_id': '"""vector_index"""'}), "(storage_context, index_id='vector_index')\n", (1149, 1191), False, 'from llama_index.core import StorageContext, VectorStoreIndex, load_index_from_storage\n'), ((1780, 1824), 'pydantic.Field', 'Field', (['...'], {'description': '"""Name of the movie."""'}), "(..., description='Name of the movie.')\n", (1785, 1824), False, 'from pydantic import Field\n'), ((1841, 1885), 'pydantic.Field', 'Field', (['...'], {'description': '"""Year of the movie."""'}), "(..., description='Year of the movie.')\n", (1846, 1885), False, 'from pydantic import Field\n'), ((1988, 2029), 'pydantic.Field', 'Field', (['...'], {'description': '"""List of movies."""'}), "(..., description='List of movies.')\n", (1993, 2029), False, 'from pydantic import Field\n'), ((6161, 6197), 'pydantic.Field', 'Field', (['...'], {'description': '"""OpenAI LLM"""'}), "(..., description='OpenAI LLM')\n", (6166, 6197), False, 'from pydantic import Field\n'), ((5409, 5438), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""'}), "(model='gpt-3.5-turbo')\n", (5415, 5438), False, 'from llama_index.llms.openai import OpenAI\n'), ((5538, 5554), 'llama_index.core.query_pipeline.InputComponent', 'InputComponent', ([], {}), '()\n', (5552, 5554), False, 'from llama_index.core.query_pipeline import InputComponent\n'), ((6744, 6770), 'llama_index.core.PromptTemplate', 'PromptTemplate', (['prompt_str'], {}), '(prompt_str)\n', (6758, 6770), False, 'from llama_index.core import PromptTemplate\n'), ((6783, 6822), 'llama_index.core.query_pipeline.QueryPipeline', 'QueryPipeline', ([], {'chain': '[prompt_tmpl, llm]'}), '(chain=[prompt_tmpl, llm])\n', (6796, 6822), False, 'from llama_index.core.query_pipeline import QueryPipeline\n')]
import logging from dataclasses import dataclass from typing import Optional import llama_index from llama_index.bridge.pydantic import BaseModel from llama_index.callbacks.base import CallbackManager from llama_index.embeddings.base import BaseEmbedding from llama_index.embeddings.utils import EmbedType, resolve_embed_model from llama_index.indices.prompt_helper import PromptHelper from llama_index.llm_predictor import LLMPredictor from llama_index.llm_predictor.base import BaseLLMPredictor, LLMMetadata from llama_index.llms.base import LLM from llama_index.llms.utils import LLMType, resolve_llm from llama_index.logger import LlamaLogger from llama_index.node_parser.interface import NodeParser from llama_index.node_parser.sentence_window import SentenceWindowNodeParser from llama_index.node_parser.simple import SimpleNodeParser from llama_index.prompts.base import BasePromptTemplate from llama_index.text_splitter.types import TextSplitter logger = logging.getLogger(__name__) def _get_default_node_parser( chunk_size: Optional[int] = None, chunk_overlap: Optional[int] = None, callback_manager: Optional[CallbackManager] = None, ) -> NodeParser: """Get default node parser.""" return SimpleNodeParser.from_defaults( chunk_size=chunk_size, chunk_overlap=chunk_overlap, callback_manager=callback_manager, ) def _get_default_prompt_helper( llm_metadata: LLMMetadata, context_window: Optional[int] = None, num_output: Optional[int] = None, ) -> PromptHelper: """Get default prompt helper.""" if context_window is not None: llm_metadata.context_window = context_window if num_output is not None: llm_metadata.num_output = num_output return PromptHelper.from_llm_metadata(llm_metadata=llm_metadata) class ServiceContextData(BaseModel): llm: dict llm_predictor: dict prompt_helper: dict embed_model: dict node_parser: dict text_splitter: Optional[dict] metadata_extractor: Optional[dict] extractors: Optional[list] @dataclass class ServiceContext: """Service Context container. The service context container is a utility container for LlamaIndex index and query classes. It contains the following: - llm_predictor: BaseLLMPredictor - prompt_helper: PromptHelper - embed_model: BaseEmbedding - node_parser: NodeParser - llama_logger: LlamaLogger (deprecated) - callback_manager: CallbackManager """ llm_predictor: BaseLLMPredictor prompt_helper: PromptHelper embed_model: BaseEmbedding node_parser: NodeParser llama_logger: LlamaLogger callback_manager: CallbackManager @classmethod def from_defaults( cls, llm_predictor: Optional[BaseLLMPredictor] = None, llm: Optional[LLMType] = "default", prompt_helper: Optional[PromptHelper] = None, embed_model: Optional[EmbedType] = "default", node_parser: Optional[NodeParser] = None, llama_logger: Optional[LlamaLogger] = None, callback_manager: Optional[CallbackManager] = None, system_prompt: Optional[str] = None, query_wrapper_prompt: Optional[BasePromptTemplate] = None, # node parser kwargs chunk_size: Optional[int] = None, chunk_overlap: Optional[int] = None, # prompt helper kwargs context_window: Optional[int] = None, num_output: Optional[int] = None, # deprecated kwargs chunk_size_limit: Optional[int] = None, ) -> "ServiceContext": """Create a ServiceContext from defaults. If an argument is specified, then use the argument value provided for that parameter. If an argument is not specified, then use the default value. You can change the base defaults by setting llama_index.global_service_context to a ServiceContext object with your desired settings. Args: llm_predictor (Optional[BaseLLMPredictor]): LLMPredictor prompt_helper (Optional[PromptHelper]): PromptHelper embed_model (Optional[BaseEmbedding]): BaseEmbedding or "local" (use local model) node_parser (Optional[NodeParser]): NodeParser llama_logger (Optional[LlamaLogger]): LlamaLogger (deprecated) chunk_size (Optional[int]): chunk_size callback_manager (Optional[CallbackManager]): CallbackManager system_prompt (Optional[str]): System-wide prompt to be prepended to all input prompts, used to guide system "decision making" query_wrapper_prompt (Optional[BasePromptTemplate]): A format to wrap passed-in input queries. Deprecated Args: chunk_size_limit (Optional[int]): renamed to chunk_size """ if chunk_size_limit is not None and chunk_size is None: logger.warning( "chunk_size_limit is deprecated, please specify chunk_size instead" ) chunk_size = chunk_size_limit if llama_index.global_service_context is not None: return cls.from_service_context( llama_index.global_service_context, llm_predictor=llm_predictor, prompt_helper=prompt_helper, embed_model=embed_model, node_parser=node_parser, llama_logger=llama_logger, callback_manager=callback_manager, chunk_size=chunk_size, chunk_size_limit=chunk_size_limit, ) callback_manager = callback_manager or CallbackManager([]) if llm != "default": if llm_predictor is not None: raise ValueError("Cannot specify both llm and llm_predictor") llm = resolve_llm(llm) llm_predictor = llm_predictor or LLMPredictor(llm=llm) if isinstance(llm_predictor, LLMPredictor): llm_predictor.llm.callback_manager = callback_manager if system_prompt: llm_predictor.system_prompt = system_prompt if query_wrapper_prompt: llm_predictor.query_wrapper_prompt = query_wrapper_prompt # NOTE: the embed_model isn't used in all indices embed_model = resolve_embed_model(embed_model) embed_model.callback_manager = callback_manager prompt_helper = prompt_helper or _get_default_prompt_helper( llm_metadata=llm_predictor.metadata, context_window=context_window, num_output=num_output, ) node_parser = node_parser or _get_default_node_parser( chunk_size=chunk_size, chunk_overlap=chunk_overlap, callback_manager=callback_manager, ) llama_logger = llama_logger or LlamaLogger() return cls( llm_predictor=llm_predictor, embed_model=embed_model, prompt_helper=prompt_helper, node_parser=node_parser, llama_logger=llama_logger, # deprecated callback_manager=callback_manager, ) @classmethod def from_service_context( cls, service_context: "ServiceContext", llm_predictor: Optional[BaseLLMPredictor] = None, llm: Optional[LLMType] = "default", prompt_helper: Optional[PromptHelper] = None, embed_model: Optional[EmbedType] = "default", node_parser: Optional[NodeParser] = None, llama_logger: Optional[LlamaLogger] = None, callback_manager: Optional[CallbackManager] = None, system_prompt: Optional[str] = None, query_wrapper_prompt: Optional[BasePromptTemplate] = None, # node parser kwargs chunk_size: Optional[int] = None, chunk_overlap: Optional[int] = None, # prompt helper kwargs context_window: Optional[int] = None, num_output: Optional[int] = None, # deprecated kwargs chunk_size_limit: Optional[int] = None, ) -> "ServiceContext": """Instantiate a new service context using a previous as the defaults.""" if chunk_size_limit is not None and chunk_size is None: logger.warning( "chunk_size_limit is deprecated, please specify chunk_size", DeprecationWarning, ) chunk_size = chunk_size_limit callback_manager = callback_manager or service_context.callback_manager if llm != "default": if llm_predictor is not None: raise ValueError("Cannot specify both llm and llm_predictor") llm = resolve_llm(llm) llm_predictor = LLMPredictor(llm=llm) llm_predictor = llm_predictor or service_context.llm_predictor if isinstance(llm_predictor, LLMPredictor): llm_predictor.llm.callback_manager = callback_manager if system_prompt: llm_predictor.system_prompt = system_prompt if query_wrapper_prompt: llm_predictor.query_wrapper_prompt = query_wrapper_prompt # NOTE: the embed_model isn't used in all indices # default to using the embed model passed from the service context if embed_model == "default": embed_model = service_context.embed_model embed_model = resolve_embed_model(embed_model) embed_model.callback_manager = callback_manager prompt_helper = prompt_helper or service_context.prompt_helper if context_window is not None or num_output is not None: prompt_helper = _get_default_prompt_helper( llm_metadata=llm_predictor.metadata, context_window=context_window, num_output=num_output, ) node_parser = node_parser or service_context.node_parser if chunk_size is not None or chunk_overlap is not None: node_parser = _get_default_node_parser( chunk_size=chunk_size, chunk_overlap=chunk_overlap, callback_manager=callback_manager, ) llama_logger = llama_logger or service_context.llama_logger return cls( llm_predictor=llm_predictor, embed_model=embed_model, prompt_helper=prompt_helper, node_parser=node_parser, llama_logger=llama_logger, # deprecated callback_manager=callback_manager, ) @property def llm(self) -> LLM: if not isinstance(self.llm_predictor, LLMPredictor): raise ValueError("llm_predictor must be an instance of LLMPredictor") return self.llm_predictor.llm def to_dict(self) -> dict: """Convert service context to dict.""" llm_dict = self.llm_predictor.llm.to_dict() llm_predictor_dict = self.llm_predictor.to_dict() embed_model_dict = self.embed_model.to_dict() prompt_helper_dict = self.prompt_helper.to_dict() node_parser_dict = self.node_parser.to_dict() metadata_extractor_dict = None extractor_dicts = None text_splitter_dict = None if isinstance(self.node_parser, SimpleNodeParser) and isinstance( self.node_parser.text_splitter, TextSplitter ): text_splitter_dict = self.node_parser.text_splitter.to_dict() if isinstance(self.node_parser, (SimpleNodeParser, SentenceWindowNodeParser)): if self.node_parser.metadata_extractor: metadata_extractor_dict = self.node_parser.metadata_extractor.to_dict() extractor_dicts = [] for extractor in self.node_parser.metadata_extractor.extractors: extractor_dicts.append(extractor.to_dict()) return ServiceContextData( llm=llm_dict, llm_predictor=llm_predictor_dict, prompt_helper=prompt_helper_dict, embed_model=embed_model_dict, node_parser=node_parser_dict, text_splitter=text_splitter_dict, metadata_extractor=metadata_extractor_dict, extractors=extractor_dicts, ).dict() @classmethod def from_dict(cls, data: dict) -> "ServiceContext": from llama_index.embeddings.loading import load_embed_model from llama_index.llm_predictor.loading import load_predictor from llama_index.llms.loading import load_llm from llama_index.node_parser.extractors.loading import load_extractor from llama_index.node_parser.loading import load_parser from llama_index.text_splitter.loading import load_text_splitter service_context_data = ServiceContextData.parse_obj(data) llm = load_llm(service_context_data.llm) llm_predictor = load_predictor(service_context_data.llm_predictor, llm=llm) embed_model = load_embed_model(service_context_data.embed_model) prompt_helper = PromptHelper.from_dict(service_context_data.prompt_helper) extractors = None if service_context_data.extractors: extractors = [] for extractor_dict in service_context_data.extractors: extractors.append(load_extractor(extractor_dict, llm=llm)) metadata_extractor = None if service_context_data.metadata_extractor: metadata_extractor = load_extractor( service_context_data.metadata_extractor, extractors=extractors, ) text_splitter = None if service_context_data.text_splitter: text_splitter = load_text_splitter(service_context_data.text_splitter) node_parser = load_parser( service_context_data.node_parser, text_splitter=text_splitter, metadata_extractor=metadata_extractor, ) return cls.from_defaults( llm_predictor=llm_predictor, prompt_helper=prompt_helper, embed_model=embed_model, node_parser=node_parser, ) def set_global_service_context(service_context: Optional[ServiceContext]) -> None: """Helper function to set the global service context.""" llama_index.global_service_context = service_context
[ "llama_index.llm_predictor.loading.load_predictor", "llama_index.llms.loading.load_llm", "llama_index.embeddings.loading.load_embed_model", "llama_index.logger.LlamaLogger", "llama_index.node_parser.extractors.loading.load_extractor", "llama_index.llms.utils.resolve_llm", "llama_index.node_parser.loading.load_parser", "llama_index.callbacks.base.CallbackManager", "llama_index.node_parser.simple.SimpleNodeParser.from_defaults", "llama_index.indices.prompt_helper.PromptHelper.from_dict", "llama_index.text_splitter.loading.load_text_splitter", "llama_index.llm_predictor.LLMPredictor", "llama_index.indices.prompt_helper.PromptHelper.from_llm_metadata", "llama_index.embeddings.utils.resolve_embed_model" ]
[((965, 992), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (982, 992), False, 'import logging\n'), ((1223, 1345), 'llama_index.node_parser.simple.SimpleNodeParser.from_defaults', 'SimpleNodeParser.from_defaults', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap', 'callback_manager': 'callback_manager'}), '(chunk_size=chunk_size, chunk_overlap=\n chunk_overlap, callback_manager=callback_manager)\n', (1253, 1345), False, 'from llama_index.node_parser.simple import SimpleNodeParser\n'), ((1748, 1805), 'llama_index.indices.prompt_helper.PromptHelper.from_llm_metadata', 'PromptHelper.from_llm_metadata', ([], {'llm_metadata': 'llm_metadata'}), '(llm_metadata=llm_metadata)\n', (1778, 1805), False, 'from llama_index.indices.prompt_helper import PromptHelper\n'), ((6295, 6327), 'llama_index.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (6314, 6327), False, 'from llama_index.embeddings.utils import EmbedType, resolve_embed_model\n'), ((9351, 9383), 'llama_index.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (9370, 9383), False, 'from llama_index.embeddings.utils import EmbedType, resolve_embed_model\n'), ((12743, 12777), 'llama_index.llms.loading.load_llm', 'load_llm', (['service_context_data.llm'], {}), '(service_context_data.llm)\n', (12751, 12777), False, 'from llama_index.llms.loading import load_llm\n'), ((12802, 12861), 'llama_index.llm_predictor.loading.load_predictor', 'load_predictor', (['service_context_data.llm_predictor'], {'llm': 'llm'}), '(service_context_data.llm_predictor, llm=llm)\n', (12816, 12861), False, 'from llama_index.llm_predictor.loading import load_predictor\n'), ((12885, 12935), 'llama_index.embeddings.loading.load_embed_model', 'load_embed_model', (['service_context_data.embed_model'], {}), '(service_context_data.embed_model)\n', (12901, 12935), False, 'from llama_index.embeddings.loading import load_embed_model\n'), ((12961, 13019), 'llama_index.indices.prompt_helper.PromptHelper.from_dict', 'PromptHelper.from_dict', (['service_context_data.prompt_helper'], {}), '(service_context_data.prompt_helper)\n', (12983, 13019), False, 'from llama_index.indices.prompt_helper import PromptHelper\n'), ((13690, 13807), 'llama_index.node_parser.loading.load_parser', 'load_parser', (['service_context_data.node_parser'], {'text_splitter': 'text_splitter', 'metadata_extractor': 'metadata_extractor'}), '(service_context_data.node_parser, text_splitter=text_splitter,\n metadata_extractor=metadata_extractor)\n', (13701, 13807), False, 'from llama_index.node_parser.loading import load_parser\n'), ((5628, 5647), 'llama_index.callbacks.base.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (5643, 5647), False, 'from llama_index.callbacks.base import CallbackManager\n'), ((5815, 5831), 'llama_index.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (5826, 5831), False, 'from llama_index.llms.utils import LLMType, resolve_llm\n'), ((5873, 5894), 'llama_index.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (5885, 5894), False, 'from llama_index.llm_predictor import LLMPredictor\n'), ((6828, 6841), 'llama_index.logger.LlamaLogger', 'LlamaLogger', ([], {}), '()\n', (6839, 6841), False, 'from llama_index.logger import LlamaLogger\n'), ((8646, 8662), 'llama_index.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (8657, 8662), False, 'from llama_index.llms.utils import LLMType, resolve_llm\n'), ((8691, 8712), 'llama_index.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (8703, 8712), False, 'from llama_index.llm_predictor import LLMPredictor\n'), ((13381, 13459), 'llama_index.node_parser.extractors.loading.load_extractor', 'load_extractor', (['service_context_data.metadata_extractor'], {'extractors': 'extractors'}), '(service_context_data.metadata_extractor, extractors=extractors)\n', (13395, 13459), False, 'from llama_index.node_parser.extractors.loading import load_extractor\n'), ((13612, 13666), 'llama_index.text_splitter.loading.load_text_splitter', 'load_text_splitter', (['service_context_data.text_splitter'], {}), '(service_context_data.text_splitter)\n', (13630, 13666), False, 'from llama_index.text_splitter.loading import load_text_splitter\n'), ((13220, 13259), 'llama_index.node_parser.extractors.loading.load_extractor', 'load_extractor', (['extractor_dict'], {'llm': 'llm'}), '(extractor_dict, llm=llm)\n', (13234, 13259), False, 'from llama_index.node_parser.extractors.loading import load_extractor\n')]
from urllib import response import llama_index from pathlib import Path from typing import Annotated, List from fastapi.responses import StreamingResponse from fastapi import ( File, Form, UploadFile, APIRouter, Depends, HTTPException, Request, status ) from llama_index import StorageContext, VectorStoreIndex, SummaryIndex from llama_index.selectors.llm_selectors import LLMSingleSelector from llama_index.llms.types import MessageRole, ChatMessage from llama_index.retrievers import VectorIndexRetriever, SummaryIndexEmbeddingRetriever, RouterRetriever from llama_index.tools import RetrieverTool from llama_index.chat_engine import ContextChatEngine from llama_index.memory import ChatMemoryBuffer from llama_index.vector_stores import ( MetadataFilter, MetadataFilters, FilterOperator ) from llama_index.callbacks import CallbackManager, LlamaDebugHandler from app.utils.json_to import json_to_model from app.utils.index import get_index from app.utils.auth import decode_access_token from app.utils.fs import get_s3_fs, get_s3_boto_client from app.db.pg_vector import get_vector_store_singleton from app.db.crud import get_documents, create_documents, delete_document, is_user_existed from app.pydantic_models.chat import ChatData from app.orm_models import Document from app.core.ingest import ingest_user_documents from app.prompts.system import LLM_SYSTEM_MESSAGE from app.prompts.selector import MULTI_SELECT_PROMPT_TEMPLATE, SINGLE_SELECTOR_PROMPT_TEMPLATE chat_router = r = APIRouter() @r.post("") async def chat( request: Request, # Note: To support clients sending a JSON object using content-type "text/plain", # we need to use Depends(json_to_model(_ChatData)) here data: Annotated[ChatData, Depends(json_to_model(ChatData))], index: Annotated[dict, Depends(get_index)], token_payload: Annotated[dict, Depends(decode_access_token)] ): # logger = logging.getLogger("uvicorn") user_id = token_payload["user_id"] # Only need to retrieve indices from the current user. filters = MetadataFilters( filters=[ MetadataFilter( key="user_id", operator=FilterOperator.EQ, value=user_id), ] ) # check preconditions and get last message if len(data.messages) == 0: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail="No messages provided", ) lastMessage = data.messages.pop() if lastMessage.role != MessageRole.USER: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail="Last message must be from user", ) # convert messages coming from the request to type ChatMessage messages = [ ChatMessage( role=m.role, content=m.content, ) for m in data.messages ] # query chat engine # system_message = ( # "You are a professional job candidate who will answer the recruiter question using the context information." # "If the question is out of scope, kindly apologize and refuse to answer." # ) # Callbacks for observability. # TODO: this is not working. llama_debug = LlamaDebugHandler(print_trace_on_end=True) callback_manager = CallbackManager([llama_debug]) vs_retriever = VectorIndexRetriever( index=index["vector"], similarity_top_k=3, filters=filters, ) summary_retriever = SummaryIndexEmbeddingRetriever( index=index["summary"], similarity_top_k=3, ) vs_tool = RetrieverTool.from_defaults( retriever=vs_retriever, description="Useful for retrieving specific context from uploaded documents." ) summary_tool = RetrieverTool.from_defaults( retriever=summary_retriever, description="Useful to retrieve all context from uploaded documents and summary tasks. Don't use if the question only requires more specific context." ) # TODO: correct the prompt used by LLM to use router retriever. retriever = RouterRetriever( selector=LLMSingleSelector.from_defaults( # prompt_template_str=SINGLE_SELECTOR_PROMPT_TEMPLATE ), retriever_tools=[vs_tool, summary_tool] ) chat_engine = ContextChatEngine( retriever=vs_retriever, llm=llama_index.global_service_context.llm, memory=ChatMemoryBuffer.from_defaults(token_limit=4096), prefix_messages=[ChatMessage( role="system", content=LLM_SYSTEM_MESSAGE)], callback_manager=callback_manager, ) print(chat_engine._retriever.get_prompts()) response = chat_engine.stream_chat(lastMessage.content, messages) # stream response async def event_generator(): for token in response.response_gen: # If client closes connection, stop sending events if await request.is_disconnected(): break yield token return StreamingResponse(event_generator(), media_type="text/plain") @r.post("/upload/single") async def upload( description: Annotated[str, Form()], question: Annotated[str, Form()], file: Annotated[UploadFile, File()], token_payload: Annotated[dict, Depends(decode_access_token)] ) -> Document: vector_store = await get_vector_store_singleton() user_id = token_payload["user_id"] user_s3_folder = Path(f"talking-resume/{user_id}") nodes = [] # Have to use boto because I don't know how to write temporary file to s3 using f3fs. s3 = get_s3_boto_client() doc = Document( s3_path=f"{user_id}/{file.filename}", is_active=True, description=description, question=question, user_id=user_id, ) # Create new record in db. doc_in_db = create_documents([doc])[0] doc_uuid = str(doc_in_db.id) # Save the document to S3. s3.upload_fileobj( file.file, "talking-resume", doc.s3_path, ) nodes = ingest_user_documents( doc_uuid, f"talking-resume/{doc.s3_path}", doc.description, doc.question, doc.user_id ) # Save documents indices and embeddings. s3 = get_s3_fs() persist_dir = None if await is_user_existed(user_id): persist_dir = f"talking-resume/{user_id}" storage_context = StorageContext.from_defaults( vector_store=vector_store, persist_dir=persist_dir, fs=s3) # Vector store index. vector_index = VectorStoreIndex.from_documents( documents=nodes, storage_context=storage_context, show_progress=True) vector_index.set_index_id(f'vector_{user_id}') vector_index.storage_context.persist(persist_dir=user_s3_folder, fs=s3) # Summary index. summary_index = SummaryIndex.from_documents( documents=nodes, storage_context=storage_context, show_progress=True) summary_index.set_index_id(f'summary_{user_id}') summary_index.storage_context.persist(persist_dir=user_s3_folder, fs=s3) return doc_in_db @r.get("/upload") def get_upload( user_id: str, token_payload: Annotated[dict, Depends(decode_access_token)] ) -> List[Document]: documents = get_documents(user_id) for document in documents: s3 = get_s3_boto_client() s3_url = s3.generate_presigned_url( "get_object", Params={ "Bucket": "talking-resume", "Key": document.s3_path, "ResponseContentDisposition": "inline", "ResponseContentType": "application/pdf"}) document.s3_url = s3_url return documents @r.delete("/upload") async def delete_upload( document_id: str, user_id: str, ) -> None: await delete_document(document_id, user_id) @r.post("/upload/multiple") async def upload( descriptions: Annotated[List[str], Form()], questions: Annotated[List[str], Form()], files: Annotated[List[UploadFile], File()], token_payload: Annotated[dict, Depends(decode_access_token)] ) -> List[Document]: vector_store = await get_vector_store_singleton() user_id = token_payload["user_id"] user_s3_folder = Path(f"talking-resume/{user_id}") # TODO: smartly remove or inactivate documents instead of full deletion. # if await is_user_existed(user_id): # await delete_all_documents_from_user(user_id) # Have to use boto because I don't know how to write temporary file to s3 using f3fs. s3 = get_s3_boto_client() nodes = [] docs = [] for user_document, description, question in zip(files, descriptions, questions): doc = Document( s3_path=f"{user_id}/{user_document.filename}", is_active=True, description=description, question=question, user_id=user_id, ) # Save the document to S3. s3.upload_fileobj( user_document.file, "talking-resume", doc.s3_path, ) nodes.extend(ingest_user_documents( f"talking-resume/{doc.s3_path}", doc.description, doc.question, doc.user_id)) docs.append(doc) # Save documents indices and embeddings. s3 = get_s3_fs() storage_context = StorageContext.from_defaults( vector_store=vector_store, fs=s3) # Vector store index. vector_index = VectorStoreIndex.from_documents( documents=nodes, storage_context=storage_context) vector_index.set_index_id(user_id) vector_index.storage_context.persist(persist_dir=user_s3_folder, fs=s3) # Create new record in db. docs = create_documents(docs) return docs
[ "llama_index.VectorStoreIndex.from_documents", "llama_index.retrievers.SummaryIndexEmbeddingRetriever", "llama_index.llms.types.ChatMessage", "llama_index.vector_stores.MetadataFilter", "llama_index.callbacks.LlamaDebugHandler", "llama_index.selectors.llm_selectors.LLMSingleSelector.from_defaults", "llama_index.StorageContext.from_defaults", "llama_index.retrievers.VectorIndexRetriever", "llama_index.callbacks.CallbackManager", "llama_index.tools.RetrieverTool.from_defaults", "llama_index.memory.ChatMemoryBuffer.from_defaults", "llama_index.SummaryIndex.from_documents" ]
[((1534, 1545), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (1543, 1545), False, 'from fastapi import File, Form, UploadFile, APIRouter, Depends, HTTPException, Request, status\n'), ((3266, 3308), 'llama_index.callbacks.LlamaDebugHandler', 'LlamaDebugHandler', ([], {'print_trace_on_end': '(True)'}), '(print_trace_on_end=True)\n', (3283, 3308), False, 'from llama_index.callbacks import CallbackManager, LlamaDebugHandler\n'), ((3332, 3362), 'llama_index.callbacks.CallbackManager', 'CallbackManager', (['[llama_debug]'], {}), '([llama_debug])\n', (3347, 3362), False, 'from llama_index.callbacks import CallbackManager, LlamaDebugHandler\n'), ((3383, 3468), 'llama_index.retrievers.VectorIndexRetriever', 'VectorIndexRetriever', ([], {'index': "index['vector']", 'similarity_top_k': '(3)', 'filters': 'filters'}), "(index=index['vector'], similarity_top_k=3, filters=filters\n )\n", (3403, 3468), False, 'from llama_index.retrievers import VectorIndexRetriever, SummaryIndexEmbeddingRetriever, RouterRetriever\n'), ((3519, 3593), 'llama_index.retrievers.SummaryIndexEmbeddingRetriever', 'SummaryIndexEmbeddingRetriever', ([], {'index': "index['summary']", 'similarity_top_k': '(3)'}), "(index=index['summary'], similarity_top_k=3)\n", (3549, 3593), False, 'from llama_index.retrievers import VectorIndexRetriever, SummaryIndexEmbeddingRetriever, RouterRetriever\n'), ((3632, 3767), 'llama_index.tools.RetrieverTool.from_defaults', 'RetrieverTool.from_defaults', ([], {'retriever': 'vs_retriever', 'description': '"""Useful for retrieving specific context from uploaded documents."""'}), "(retriever=vs_retriever, description=\n 'Useful for retrieving specific context from uploaded documents.')\n", (3659, 3767), False, 'from llama_index.tools import RetrieverTool\n'), ((3804, 4022), 'llama_index.tools.RetrieverTool.from_defaults', 'RetrieverTool.from_defaults', ([], {'retriever': 'summary_retriever', 'description': '"""Useful to retrieve all context from uploaded documents and summary tasks. Don\'t use if the question only requires more specific context."""'}), '(retriever=summary_retriever, description=\n "Useful to retrieve all context from uploaded documents and summary tasks. Don\'t use if the question only requires more specific context."\n )\n', (3831, 4022), False, 'from llama_index.tools import RetrieverTool\n'), ((5459, 5492), 'pathlib.Path', 'Path', (['f"""talking-resume/{user_id}"""'], {}), "(f'talking-resume/{user_id}')\n", (5463, 5492), False, 'from pathlib import Path\n'), ((5608, 5628), 'app.utils.fs.get_s3_boto_client', 'get_s3_boto_client', ([], {}), '()\n', (5626, 5628), False, 'from app.utils.fs import get_s3_fs, get_s3_boto_client\n'), ((5639, 5767), 'app.orm_models.Document', 'Document', ([], {'s3_path': 'f"""{user_id}/{file.filename}"""', 'is_active': '(True)', 'description': 'description', 'question': 'question', 'user_id': 'user_id'}), "(s3_path=f'{user_id}/{file.filename}', is_active=True, description=\n description, question=question, user_id=user_id)\n", (5647, 5767), False, 'from app.orm_models import Document\n'), ((6056, 6169), 'app.core.ingest.ingest_user_documents', 'ingest_user_documents', (['doc_uuid', 'f"""talking-resume/{doc.s3_path}"""', 'doc.description', 'doc.question', 'doc.user_id'], {}), "(doc_uuid, f'talking-resume/{doc.s3_path}', doc.\n description, doc.question, doc.user_id)\n", (6077, 6169), False, 'from app.core.ingest import ingest_user_documents\n'), ((6266, 6277), 'app.utils.fs.get_s3_fs', 'get_s3_fs', ([], {}), '()\n', (6275, 6277), False, 'from app.utils.fs import get_s3_fs, get_s3_boto_client\n'), ((6413, 6505), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store', 'persist_dir': 'persist_dir', 'fs': 's3'}), '(vector_store=vector_store, persist_dir=\n persist_dir, fs=s3)\n', (6441, 6505), False, 'from llama_index import StorageContext, VectorStoreIndex, SummaryIndex\n'), ((6571, 6677), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', ([], {'documents': 'nodes', 'storage_context': 'storage_context', 'show_progress': '(True)'}), '(documents=nodes, storage_context=\n storage_context, show_progress=True)\n', (6602, 6677), False, 'from llama_index import StorageContext, VectorStoreIndex, SummaryIndex\n'), ((6851, 6953), 'llama_index.SummaryIndex.from_documents', 'SummaryIndex.from_documents', ([], {'documents': 'nodes', 'storage_context': 'storage_context', 'show_progress': '(True)'}), '(documents=nodes, storage_context=\n storage_context, show_progress=True)\n', (6878, 6953), False, 'from llama_index import StorageContext, VectorStoreIndex, SummaryIndex\n'), ((7266, 7288), 'app.db.crud.get_documents', 'get_documents', (['user_id'], {}), '(user_id)\n', (7279, 7288), False, 'from app.db.crud import get_documents, create_documents, delete_document, is_user_existed\n'), ((8235, 8268), 'pathlib.Path', 'Path', (['f"""talking-resume/{user_id}"""'], {}), "(f'talking-resume/{user_id}')\n", (8239, 8268), False, 'from pathlib import Path\n'), ((8544, 8564), 'app.utils.fs.get_s3_boto_client', 'get_s3_boto_client', ([], {}), '()\n', (8562, 8564), False, 'from app.utils.fs import get_s3_fs, get_s3_boto_client\n'), ((9271, 9282), 'app.utils.fs.get_s3_fs', 'get_s3_fs', ([], {}), '()\n', (9280, 9282), False, 'from app.utils.fs import get_s3_fs, get_s3_boto_client\n'), ((9305, 9367), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store', 'fs': 's3'}), '(vector_store=vector_store, fs=s3)\n', (9333, 9367), False, 'from llama_index import StorageContext, VectorStoreIndex, SummaryIndex\n'), ((9423, 9509), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', ([], {'documents': 'nodes', 'storage_context': 'storage_context'}), '(documents=nodes, storage_context=\n storage_context)\n', (9454, 9509), False, 'from llama_index import StorageContext, VectorStoreIndex, SummaryIndex\n'), ((9672, 9694), 'app.db.crud.create_documents', 'create_documents', (['docs'], {}), '(docs)\n', (9688, 9694), False, 'from app.db.crud import get_documents, create_documents, delete_document, is_user_existed\n'), ((2361, 2451), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_400_BAD_REQUEST', 'detail': '"""No messages provided"""'}), "(status_code=status.HTTP_400_BAD_REQUEST, detail=\n 'No messages provided')\n", (2374, 2451), False, 'from fastapi import File, Form, UploadFile, APIRouter, Depends, HTTPException, Request, status\n'), ((2579, 2679), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_400_BAD_REQUEST', 'detail': '"""Last message must be from user"""'}), "(status_code=status.HTTP_400_BAD_REQUEST, detail=\n 'Last message must be from user')\n", (2592, 2679), False, 'from fastapi import File, Form, UploadFile, APIRouter, Depends, HTTPException, Request, status\n'), ((2802, 2845), 'llama_index.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'm.role', 'content': 'm.content'}), '(role=m.role, content=m.content)\n', (2813, 2845), False, 'from llama_index.llms.types import MessageRole, ChatMessage\n'), ((5370, 5398), 'app.db.pg_vector.get_vector_store_singleton', 'get_vector_store_singleton', ([], {}), '()\n', (5396, 5398), False, 'from app.db.pg_vector import get_vector_store_singleton\n'), ((5857, 5880), 'app.db.crud.create_documents', 'create_documents', (['[doc]'], {}), '([doc])\n', (5873, 5880), False, 'from app.db.crud import get_documents, create_documents, delete_document, is_user_existed\n'), ((6314, 6338), 'app.db.crud.is_user_existed', 'is_user_existed', (['user_id'], {}), '(user_id)\n', (6329, 6338), False, 'from app.db.crud import get_documents, create_documents, delete_document, is_user_existed\n'), ((7333, 7353), 'app.utils.fs.get_s3_boto_client', 'get_s3_boto_client', ([], {}), '()\n', (7351, 7353), False, 'from app.utils.fs import get_s3_fs, get_s3_boto_client\n'), ((7808, 7845), 'app.db.crud.delete_document', 'delete_document', (['document_id', 'user_id'], {}), '(document_id, user_id)\n', (7823, 7845), False, 'from app.db.crud import get_documents, create_documents, delete_document, is_user_existed\n'), ((8146, 8174), 'app.db.pg_vector.get_vector_store_singleton', 'get_vector_store_singleton', ([], {}), '()\n', (8172, 8174), False, 'from app.db.pg_vector import get_vector_store_singleton\n'), ((8693, 8829), 'app.orm_models.Document', 'Document', ([], {'s3_path': 'f"""{user_id}/{user_document.filename}"""', 'is_active': '(True)', 'description': 'description', 'question': 'question', 'user_id': 'user_id'}), "(s3_path=f'{user_id}/{user_document.filename}', is_active=True,\n description=description, question=question, user_id=user_id)\n", (8701, 8829), False, 'from app.orm_models import Document\n'), ((4154, 4187), 'llama_index.selectors.llm_selectors.LLMSingleSelector.from_defaults', 'LLMSingleSelector.from_defaults', ([], {}), '()\n', (4185, 4187), False, 'from llama_index.selectors.llm_selectors import LLMSingleSelector\n'), ((4455, 4503), 'llama_index.memory.ChatMemoryBuffer.from_defaults', 'ChatMemoryBuffer.from_defaults', ([], {'token_limit': '(4096)'}), '(token_limit=4096)\n', (4485, 4503), False, 'from llama_index.memory import ChatMemoryBuffer\n'), ((9078, 9181), 'app.core.ingest.ingest_user_documents', 'ingest_user_documents', (['f"""talking-resume/{doc.s3_path}"""', 'doc.description', 'doc.question', 'doc.user_id'], {}), "(f'talking-resume/{doc.s3_path}', doc.description, doc\n .question, doc.user_id)\n", (9099, 9181), False, 'from app.core.ingest import ingest_user_documents\n'), ((1836, 1854), 'fastapi.Depends', 'Depends', (['get_index'], {}), '(get_index)\n', (1843, 1854), False, 'from fastapi import File, Form, UploadFile, APIRouter, Depends, HTTPException, Request, status\n'), ((1892, 1920), 'fastapi.Depends', 'Depends', (['decode_access_token'], {}), '(decode_access_token)\n', (1899, 1920), False, 'from fastapi import File, Form, UploadFile, APIRouter, Depends, HTTPException, Request, status\n'), ((2128, 2200), 'llama_index.vector_stores.MetadataFilter', 'MetadataFilter', ([], {'key': '"""user_id"""', 'operator': 'FilterOperator.EQ', 'value': 'user_id'}), "(key='user_id', operator=FilterOperator.EQ, value=user_id)\n", (2142, 2200), False, 'from llama_index.vector_stores import MetadataFilter, MetadataFilters, FilterOperator\n'), ((4530, 4584), 'llama_index.llms.types.ChatMessage', 'ChatMessage', ([], {'role': '"""system"""', 'content': 'LLM_SYSTEM_MESSAGE'}), "(role='system', content=LLM_SYSTEM_MESSAGE)\n", (4541, 4584), False, 'from llama_index.llms.types import MessageRole, ChatMessage\n'), ((5177, 5183), 'fastapi.Form', 'Form', ([], {}), '()\n', (5181, 5183), False, 'from fastapi import File, Form, UploadFile, APIRouter, Depends, HTTPException, Request, status\n'), ((5215, 5221), 'fastapi.Form', 'Form', ([], {}), '()\n', (5219, 5221), False, 'from fastapi import File, Form, UploadFile, APIRouter, Depends, HTTPException, Request, status\n'), ((5256, 5262), 'fastapi.File', 'File', ([], {}), '()\n', (5260, 5262), False, 'from fastapi import File, Form, UploadFile, APIRouter, Depends, HTTPException, Request, status\n'), ((5300, 5328), 'fastapi.Depends', 'Depends', (['decode_access_token'], {}), '(decode_access_token)\n', (5307, 5328), False, 'from fastapi import File, Form, UploadFile, APIRouter, Depends, HTTPException, Request, status\n'), ((7199, 7227), 'fastapi.Depends', 'Depends', (['decode_access_token'], {}), '(decode_access_token)\n', (7206, 7227), False, 'from fastapi import File, Form, UploadFile, APIRouter, Depends, HTTPException, Request, status\n'), ((7933, 7939), 'fastapi.Form', 'Form', ([], {}), '()\n', (7937, 7939), False, 'from fastapi import File, Form, UploadFile, APIRouter, Depends, HTTPException, Request, status\n'), ((7978, 7984), 'fastapi.Form', 'Form', ([], {}), '()\n', (7982, 7984), False, 'from fastapi import File, Form, UploadFile, APIRouter, Depends, HTTPException, Request, status\n'), ((8026, 8032), 'fastapi.File', 'File', ([], {}), '()\n', (8030, 8032), False, 'from fastapi import File, Form, UploadFile, APIRouter, Depends, HTTPException, Request, status\n'), ((8070, 8098), 'fastapi.Depends', 'Depends', (['decode_access_token'], {}), '(decode_access_token)\n', (8077, 8098), False, 'from fastapi import File, Form, UploadFile, APIRouter, Depends, HTTPException, Request, status\n'), ((1782, 1805), 'app.utils.json_to.json_to_model', 'json_to_model', (['ChatData'], {}), '(ChatData)\n', (1795, 1805), False, 'from app.utils.json_to import json_to_model\n')]
import utils import os import requests import llama_index import torch import llama_cpp from llama_index import SimpleDirectoryReader from llama_index import Document from llama_index import VectorStoreIndex from llama_index import ServiceContext from llama_index import LLMPredictor # Paramas llama = True ### Get data dirpath = 'related_works/Cloud_VM/' filename = dirpath + 'ey.pdf' url = 'https://assets.ey.com/content/dam/ey-sites/ey-com/nl_nl/topics/jaarverslag/downloads-pdfs/2022-2023/ey-nl-financial-statements-2023-en.pdf' if not os.path.exists(filename): print(f"Downloading {filename} from {url}...") response = requests.get(url) with open(dirpath + 'ey.pdf', 'wb') as f: f.write(response.content) documents = SimpleDirectoryReader( input_files=[filename] ).load_data() ### Print data print(type(documents), "\n") print(len(documents), "\n") print(type(documents[0])) print(documents[0]) ### Create doc object document = Document(text="\n\n".join([doc.text for doc in documents])) ### load model model_name_or_path = "TheBloke/Llama-2-13B-chat-GGML" model_basename = "llama-2-13b-chat.ggmlv3.q5_1.bin" # the model is in bin format from huggingface_hub import hf_hub_download model_path = hf_hub_download(repo_id=model_name_or_path, filename=model_basename) if llama: # GPU from llama_cpp import Llama llm = None llm = Llama( model_path=model_path, n_threads=2, # CPU cores n_batch=512, # Should be between 1 and n_ctx, consider the amount of VRAM in your GPU. n_gpu_layers=43, # Change this value based on your model and your GPU VRAM pool. n_ctx=4096, # Context window ) else: from transformers import LlamaTokenizer, LlamaForCausalLM tokenizer = LlamaTokenizer.from_pretrained('ChanceFocus/finma-7b-full') llm = LlamaForCausalLM.from_pretrained('ChanceFocus/finma-7b-full', device_map='auto') ##### The replicate endpoint from llama_index.llms import Replicate from llama_index import ServiceContext, set_global_service_context from llama_index.llms.llama_utils import ( messages_to_prompt, completion_to_prompt, ) LLAMA_13B_V2_CHAT = "a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5" # inject custom system prompt into llama-2 def custom_completion_to_prompt(completion: str) -> str: return completion_to_prompt( completion, system_prompt=( "You are a Q&A assistant. Your goal is to answer questions as " "accurately as possible is the instructions and context provided." ), ) llm = Replicate( model=LLAMA_13B_V2_CHAT, temperature=0.01, # override max tokens since it's interpreted # as context window instead of max tokens context_window=4096, # override completion representation for llama 2 completion_to_prompt=custom_completion_to_prompt, # if using llama 2 for data agents, also override the message representation messages_to_prompt=messages_to_prompt, ) service_context = ServiceContext.from_defaults( llm=llm, embed_model="local:BAAI/bge-small-en-v1.5" ) index = VectorStoreIndex.from_documents([document], service_context=service_context) query_engine = index.as_query_engine() response = query_engine.query( "What actions is Ernst & Young Global Limited taking to address climate change issues?" ) print(str(response)) # ## Evaluation setup using TruLens eval_questions = [] with open('eval_questions.txt', 'r') as file: for line in file: # Remove newline character and convert to integer item = line.strip() print(item) eval_questions.append(item) # You can try your own question: new_question = "What is the right AI job for me?" eval_questions.append(new_question) print(eval_questions) from trulens_eval import Tru tru = Tru() tru.reset_database() from utils import get_prebuilt_trulens_recorder tru_recorder = get_prebuilt_trulens_recorder(query_engine, app_id="Direct Query Engine") with tru_recorder as recording: for question in eval_questions: response = query_engine.query(question) records, feedback = tru.get_records_and_feedback(app_ids=[]) records.head() # launches on http://localhost:8501/ tru.run_dashboard() # ## Advanced RAG pipeline # ### 1. Sentence Window retrieval from utils import build_sentence_window_index sentence_index = build_sentence_window_index( document, llm, embed_model="local:BAAI/bge-small-en-v1.5", save_dir="sentence_index" ) from utils import get_sentence_window_query_engine sentence_window_engine = get_sentence_window_query_engine(sentence_index) window_response = sentence_window_engine.query( "how do I get started on a personal project in AI?" ) print(str(window_response)) tru.reset_database() tru_recorder_sentence_window = get_prebuilt_trulens_recorder( sentence_window_engine, app_id = "Sentence Window Query Engine" ) for question in eval_questions: with tru_recorder_sentence_window as recording: response = sentence_window_engine.query(question) print(question) print(str(response)) tru.get_leaderboard(app_ids=[]) # launches on http://localhost:8501/ tru.run_dashboard() # ### 2. Auto-merging retrieval from utils import build_automerging_index automerging_index = build_automerging_index( documents, llm, embed_model="local:BAAI/bge-small-en-v1.5", save_dir="merging_index" ) from utils import get_automerging_query_engine automerging_query_engine = get_automerging_query_engine( automerging_index, ) auto_merging_response = automerging_query_engine.query( "How do I build a portfolio of AI projects?" ) print(str(auto_merging_response)) tru.reset_database() tru_recorder_automerging = get_prebuilt_trulens_recorder(automerging_query_engine, app_id="Automerging Query Engine") for question in eval_questions: with tru_recorder_automerging as recording: response = automerging_query_engine.query(question) print(question) print(response) tru.get_leaderboard(app_ids=[]) # launches on http://localhost:8501/ tru.run_dashboard()
[ "llama_index.VectorStoreIndex.from_documents", "llama_index.ServiceContext.from_defaults", "llama_index.SimpleDirectoryReader", "llama_index.llms.llama_utils.completion_to_prompt", "llama_index.llms.Replicate" ]
[((1239, 1307), 'huggingface_hub.hf_hub_download', 'hf_hub_download', ([], {'repo_id': 'model_name_or_path', 'filename': 'model_basename'}), '(repo_id=model_name_or_path, filename=model_basename)\n', (1254, 1307), False, 'from huggingface_hub import hf_hub_download\n'), ((2628, 2799), 'llama_index.llms.Replicate', 'Replicate', ([], {'model': 'LLAMA_13B_V2_CHAT', 'temperature': '(0.01)', 'context_window': '(4096)', 'completion_to_prompt': 'custom_completion_to_prompt', 'messages_to_prompt': 'messages_to_prompt'}), '(model=LLAMA_13B_V2_CHAT, temperature=0.01, context_window=4096,\n completion_to_prompt=custom_completion_to_prompt, messages_to_prompt=\n messages_to_prompt)\n', (2637, 2799), False, 'from llama_index.llms import Replicate\n'), ((3062, 3148), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': '"""local:BAAI/bge-small-en-v1.5"""'}), "(llm=llm, embed_model=\n 'local:BAAI/bge-small-en-v1.5')\n", (3090, 3148), False, 'from llama_index import ServiceContext, set_global_service_context\n'), ((3158, 3234), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['[document]'], {'service_context': 'service_context'}), '([document], service_context=service_context)\n', (3189, 3234), False, 'from llama_index import VectorStoreIndex\n'), ((3909, 3914), 'trulens_eval.Tru', 'Tru', ([], {}), '()\n', (3912, 3914), False, 'from trulens_eval import Tru\n'), ((4002, 4075), 'utils.get_prebuilt_trulens_recorder', 'get_prebuilt_trulens_recorder', (['query_engine'], {'app_id': '"""Direct Query Engine"""'}), "(query_engine, app_id='Direct Query Engine')\n", (4031, 4075), False, 'from utils import get_prebuilt_trulens_recorder\n'), ((4503, 4621), 'utils.build_sentence_window_index', 'build_sentence_window_index', (['document', 'llm'], {'embed_model': '"""local:BAAI/bge-small-en-v1.5"""', 'save_dir': '"""sentence_index"""'}), "(document, llm, embed_model=\n 'local:BAAI/bge-small-en-v1.5', save_dir='sentence_index')\n", (4530, 4621), False, 'from utils import build_sentence_window_index\n'), ((4713, 4761), 'utils.get_sentence_window_query_engine', 'get_sentence_window_query_engine', (['sentence_index'], {}), '(sentence_index)\n', (4745, 4761), False, 'from utils import get_sentence_window_query_engine\n'), ((4951, 5048), 'utils.get_prebuilt_trulens_recorder', 'get_prebuilt_trulens_recorder', (['sentence_window_engine'], {'app_id': '"""Sentence Window Query Engine"""'}), "(sentence_window_engine, app_id=\n 'Sentence Window Query Engine')\n", (4980, 5048), False, 'from utils import get_prebuilt_trulens_recorder\n'), ((5440, 5554), 'utils.build_automerging_index', 'build_automerging_index', (['documents', 'llm'], {'embed_model': '"""local:BAAI/bge-small-en-v1.5"""', 'save_dir': '"""merging_index"""'}), "(documents, llm, embed_model=\n 'local:BAAI/bge-small-en-v1.5', save_dir='merging_index')\n", (5463, 5554), False, 'from utils import build_automerging_index\n'), ((5644, 5691), 'utils.get_automerging_query_engine', 'get_automerging_query_engine', (['automerging_index'], {}), '(automerging_index)\n', (5672, 5691), False, 'from utils import get_automerging_query_engine\n'), ((5891, 5986), 'utils.get_prebuilt_trulens_recorder', 'get_prebuilt_trulens_recorder', (['automerging_query_engine'], {'app_id': '"""Automerging Query Engine"""'}), "(automerging_query_engine, app_id=\n 'Automerging Query Engine')\n", (5920, 5986), False, 'from utils import get_prebuilt_trulens_recorder\n'), ((545, 569), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (559, 569), False, 'import os\n'), ((641, 658), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (653, 658), False, 'import requests\n'), ((1386, 1473), 'llama_cpp.Llama', 'Llama', ([], {'model_path': 'model_path', 'n_threads': '(2)', 'n_batch': '(512)', 'n_gpu_layers': '(43)', 'n_ctx': '(4096)'}), '(model_path=model_path, n_threads=2, n_batch=512, n_gpu_layers=43,\n n_ctx=4096)\n', (1391, 1473), False, 'from llama_cpp import Llama\n'), ((1772, 1831), 'transformers.LlamaTokenizer.from_pretrained', 'LlamaTokenizer.from_pretrained', (['"""ChanceFocus/finma-7b-full"""'], {}), "('ChanceFocus/finma-7b-full')\n", (1802, 1831), False, 'from transformers import LlamaTokenizer, LlamaForCausalLM\n'), ((1842, 1927), 'transformers.LlamaForCausalLM.from_pretrained', 'LlamaForCausalLM.from_pretrained', (['"""ChanceFocus/finma-7b-full"""'], {'device_map': '"""auto"""'}), "('ChanceFocus/finma-7b-full', device_map='auto'\n )\n", (1874, 1927), False, 'from transformers import LlamaTokenizer, LlamaForCausalLM\n'), ((2382, 2567), 'llama_index.llms.llama_utils.completion_to_prompt', 'completion_to_prompt', (['completion'], {'system_prompt': '"""You are a Q&A assistant. Your goal is to answer questions as accurately as possible is the instructions and context provided."""'}), "(completion, system_prompt=\n 'You are a Q&A assistant. Your goal is to answer questions as accurately as possible is the instructions and context provided.'\n )\n", (2402, 2567), False, 'from llama_index.llms.llama_utils import messages_to_prompt, completion_to_prompt\n'), ((752, 797), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': '[filename]'}), '(input_files=[filename])\n', (773, 797), False, 'from llama_index import SimpleDirectoryReader\n')]
"""Chat service module.""" from llama_index.core.memory import ChatMemoryBuffer from llama_index.core.base.llms.types import ChatMessage, MessageRole from app.api.database.models.message import MessageCreateModel from app.api.services.message_service import MessageService from app.api.services.ingest_service import ingest_service import llama_index.core llama_index.core.set_global_handler("simple") class ChatService: """Chat Service class for chat operations.""" def __init__(self): self.message_service = MessageService() @staticmethod def chat(query: str): """Chat with the document.""" chat_engine = ingest_service.index.as_query_engine( similarity_top_k=5, streaming=True, verbose=False ) streaming_response = chat_engine.query(query) return streaming_response.response_gen def conversation(self, query: str, session_id: str): """Get answer from the chat engine.""" history = self.message_service.get_messages_by_session_id(session_id) chat_history = [] if history.messages: for message in history.messages: chat_history.append( ChatMessage( message=message.message, role=( MessageRole.USER if message.sender == "user" else MessageRole.ASSISTANT ), ) ) memory = ChatMemoryBuffer.from_defaults( chat_history=chat_history, token_limit=8000 ) chat_engine = ingest_service.index.as_chat_engine( chat_mode="context", memory=memory, similarity_top_k=5, verbose=False, system_prompt=( """\ You are a chatbot. You MUST NOT provide any information unless it is in the Context or previous messages or general conversation. If the user ask something you don't know, say that you cannot answer. \ you MUST keep the answers short and simple. \ """ ), ) response = chat_engine.stream_chat(message=query) for token in response.response_gen: yield token self.message_service.create_message( message=MessageCreateModel( session_id=session_id, message=query, sender="user", ) ) self.message_service.create_message( message=MessageCreateModel( session_id=session_id, message=str(response), sender="assistant", ) )
[ "llama_index.core.memory.ChatMemoryBuffer.from_defaults", "llama_index.core.base.llms.types.ChatMessage" ]
[((532, 548), 'app.api.services.message_service.MessageService', 'MessageService', ([], {}), '()\n', (546, 548), False, 'from app.api.services.message_service import MessageService\n'), ((655, 746), 'app.api.services.ingest_service.ingest_service.index.as_query_engine', 'ingest_service.index.as_query_engine', ([], {'similarity_top_k': '(5)', 'streaming': '(True)', 'verbose': '(False)'}), '(similarity_top_k=5, streaming=True,\n verbose=False)\n', (691, 746), False, 'from app.api.services.ingest_service import ingest_service\n'), ((1542, 1617), 'llama_index.core.memory.ChatMemoryBuffer.from_defaults', 'ChatMemoryBuffer.from_defaults', ([], {'chat_history': 'chat_history', 'token_limit': '(8000)'}), '(chat_history=chat_history, token_limit=8000)\n', (1572, 1617), False, 'from llama_index.core.memory import ChatMemoryBuffer\n'), ((1662, 2080), 'app.api.services.ingest_service.ingest_service.index.as_chat_engine', 'ingest_service.index.as_chat_engine', ([], {'chat_mode': '"""context"""', 'memory': 'memory', 'similarity_top_k': '(5)', 'verbose': '(False)', 'system_prompt': '""" You are a chatbot. You MUST NOT provide any information unless it is in the Context or previous messages or general conversation. If the user ask something you don\'t know, say that you cannot answer. you MUST keep the answers short and simple. """'}), '(chat_mode=\'context\', memory=memory,\n similarity_top_k=5, verbose=False, system_prompt=\n " You are a chatbot. You MUST NOT provide any information unless it is in the Context or previous messages or general conversation. If the user ask something you don\'t know, say that you cannot answer. you MUST keep the answers short and simple. "\n )\n', (1697, 2080), False, 'from app.api.services.ingest_service import ingest_service\n'), ((2374, 2445), 'app.api.database.models.message.MessageCreateModel', 'MessageCreateModel', ([], {'session_id': 'session_id', 'message': 'query', 'sender': '"""user"""'}), "(session_id=session_id, message=query, sender='user')\n", (2392, 2445), False, 'from app.api.database.models.message import MessageCreateModel\n'), ((1208, 1327), 'llama_index.core.base.llms.types.ChatMessage', 'ChatMessage', ([], {'message': 'message.message', 'role': "(MessageRole.USER if message.sender == 'user' else MessageRole.ASSISTANT)"}), "(message=message.message, role=MessageRole.USER if message.\n sender == 'user' else MessageRole.ASSISTANT)\n", (1219, 1327), False, 'from llama_index.core.base.llms.types import ChatMessage, MessageRole\n')]
"""FastAPI app creation, logger configuration and main API routes.""" import sys from typing import Any import llama_index from fastapi import FastAPI from fastapi.openapi.utils import get_openapi from loguru import logger from private_gpt.paths import docs_path from private_gpt.server.chat.chat_router import chat_router from private_gpt.server.chunks.chunks_router import chunks_router from private_gpt.server.completions.completions_router import completions_router from private_gpt.server.embeddings.embeddings_router import embeddings_router from private_gpt.server.health.health_router import health_router from private_gpt.server.ingest.ingest_router import ingest_router from private_gpt.settings.settings import settings # Remove pre-configured logging handler logger.remove(0) # Create a new logging handler same as the pre-configured one but with the extra # attribute `request_id` logger.add( sys.stdout, level="INFO", format=( "<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green> | " "<level>{level: <8}</level> | " "<cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> | " "ID: {extra[request_id]} - <level>{message}</level>" ), ) # Add LlamaIndex simple observability llama_index.set_global_handler("simple") # Start the API with open(docs_path / "description.md") as description_file: description = description_file.read() tags_metadata = [ { "name": "Ingestion", "description": "High-level APIs covering document ingestion -internally " "managing document parsing, splitting," "metadata extraction, embedding generation and storage- and ingested " "documents CRUD." "Each ingested document is identified by an ID that can be used to filter the " "context" "used in *Contextual Completions* and *Context Chunks* APIs.", }, { "name": "Contextual Completions", "description": "High-level APIs covering contextual Chat and Completions. They " "follow OpenAI's format, extending it to " "allow using the context coming from ingested documents to create the " "response. Internally" "manage context retrieval, prompt engineering and the response generation.", }, { "name": "Context Chunks", "description": "Low-level API that given a query return relevant chunks of " "text coming from the ingested" "documents.", }, { "name": "Embeddings", "description": "Low-level API to obtain the vector representation of a given " "text, using an Embeddings model." "Follows OpenAI's embeddings API format.", }, { "name": "Health", "description": "Simple health API to make sure the server is up and running.", }, ] app = FastAPI() def custom_openapi() -> dict[str, Any]: if app.openapi_schema: return app.openapi_schema openapi_schema = get_openapi( title="Momentus GPT", description=description, version="0.1.0", summary="PrivateGPT is a production-ready AI project that allows you to " "ask questions to your documents using the power of Large Language " "Models (LLMs), even in scenarios without Internet connection. " "100% private, no data leaves your execution environment at any point.", contact={ "url": "https://github.com/imartinez/privateGPT", }, license_info={ "name": "Apache 2.0", "url": "https://www.apache.org/licenses/LICENSE-2.0.html", }, routes=app.routes, tags=tags_metadata, ) openapi_schema["info"]["x-logo"] = { "url": "https://lh3.googleusercontent.com/drive-viewer" "/AK7aPaD_iNlMoTquOBsw4boh4tIYxyEuhz6EtEs8nzq3yNkNAK00xGj" "E1KUCmPJSk3TYOjcs6tReG6w_cLu1S7L_gPgT9z52iw=s2560" } app.openapi_schema = openapi_schema return app.openapi_schema app.openapi = custom_openapi # type: ignore[method-assign] app.include_router(completions_router) app.include_router(chat_router) app.include_router(chunks_router) app.include_router(ingest_router) app.include_router(embeddings_router) app.include_router(health_router) if settings.ui.enabled: from private_gpt.ui.ui import mount_in_app mount_in_app(app)
[ "llama_index.set_global_handler" ]
[((774, 790), 'loguru.logger.remove', 'logger.remove', (['(0)'], {}), '(0)\n', (787, 790), False, 'from loguru import logger\n'), ((897, 1147), 'loguru.logger.add', 'logger.add', (['sys.stdout'], {'level': '"""INFO"""', 'format': '"""<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green> | <level>{level: <8}</level> | <cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> | ID: {extra[request_id]} - <level>{message}</level>"""'}), "(sys.stdout, level='INFO', format=\n '<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green> | <level>{level: <8}</level> | <cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> | ID: {extra[request_id]} - <level>{message}</level>'\n )\n", (907, 1147), False, 'from loguru import logger\n'), ((1241, 1281), 'llama_index.set_global_handler', 'llama_index.set_global_handler', (['"""simple"""'], {}), "('simple')\n", (1271, 1281), False, 'import llama_index\n'), ((2819, 2828), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (2826, 2828), False, 'from fastapi import FastAPI\n'), ((2953, 3521), 'fastapi.openapi.utils.get_openapi', 'get_openapi', ([], {'title': '"""Momentus GPT"""', 'description': 'description', 'version': '"""0.1.0"""', 'summary': '"""PrivateGPT is a production-ready AI project that allows you to ask questions to your documents using the power of Large Language Models (LLMs), even in scenarios without Internet connection. 100% private, no data leaves your execution environment at any point."""', 'contact': "{'url': 'https://github.com/imartinez/privateGPT'}", 'license_info': "{'name': 'Apache 2.0', 'url':\n 'https://www.apache.org/licenses/LICENSE-2.0.html'}", 'routes': 'app.routes', 'tags': 'tags_metadata'}), "(title='Momentus GPT', description=description, version='0.1.0',\n summary=\n 'PrivateGPT is a production-ready AI project that allows you to ask questions to your documents using the power of Large Language Models (LLMs), even in scenarios without Internet connection. 100% private, no data leaves your execution environment at any point.'\n , contact={'url': 'https://github.com/imartinez/privateGPT'},\n license_info={'name': 'Apache 2.0', 'url':\n 'https://www.apache.org/licenses/LICENSE-2.0.html'}, routes=app.routes,\n tags=tags_metadata)\n", (2964, 3521), False, 'from fastapi.openapi.utils import get_openapi\n'), ((4319, 4336), 'private_gpt.ui.ui.mount_in_app', 'mount_in_app', (['app'], {}), '(app)\n', (4331, 4336), False, 'from private_gpt.ui.ui import mount_in_app\n')]
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('wget "https://github.com/ppasupat/WikiTableQuestions/releases/download/v1.0.2/WikiTableQuestions-1.0.2-compact.zip" -O data.zip') get_ipython().system('unzip data.zip') import pandas as pd from pathlib import Path data_dir = Path("./WikiTableQuestions/csv/200-csv") csv_files = sorted([f for f in data_dir.glob("*.csv")]) dfs = [] for csv_file in csv_files: print(f"processing file: {csv_file}") try: df = pd.read_csv(csv_file) dfs.append(df) except Exception as e: print(f"Error parsing {csv_file}: {str(e)}") tableinfo_dir = "WikiTableQuestions_TableInfo" get_ipython().system('mkdir {tableinfo_dir}') from llama_index.core.program import LLMTextCompletionProgram from llama_index.core.bridge.pydantic import BaseModel, Field from llama_index.llms.openai import OpenAI class TableInfo(BaseModel): """Information regarding a structured table.""" table_name: str = Field( ..., description="table name (must be underscores and NO spaces)" ) table_summary: str = Field( ..., description="short, concise summary/caption of the table" ) prompt_str = """\ Give me a summary of the table with the following JSON format. - The table name must be unique to the table and describe it while being concise. - Do NOT output a generic table name (e.g. table, my_table). Do NOT make the table name one of the following: {exclude_table_name_list} Table: {table_str} Summary: """ program = LLMTextCompletionProgram.from_defaults( output_cls=TableInfo, llm=OpenAI(model="gpt-3.5-turbo"), prompt_template_str=prompt_str, ) import json def _get_tableinfo_with_index(idx: int) -> str: results_gen = Path(tableinfo_dir).glob(f"{idx}_*") results_list = list(results_gen) if len(results_list) == 0: return None elif len(results_list) == 1: path = results_list[0] return TableInfo.parse_file(path) else: raise ValueError( f"More than one file matching index: {list(results_gen)}" ) table_names = set() table_infos = [] for idx, df in enumerate(dfs): table_info = _get_tableinfo_with_index(idx) if table_info: table_infos.append(table_info) else: while True: df_str = df.head(10).to_csv() table_info = program( table_str=df_str, exclude_table_name_list=str(list(table_names)), ) table_name = table_info.table_name print(f"Processed table: {table_name}") if table_name not in table_names: table_names.add(table_name) break else: print(f"Table name {table_name} already exists, trying again.") pass out_file = f"{tableinfo_dir}/{idx}_{table_name}.json" json.dump(table_info.dict(), open(out_file, "w")) table_infos.append(table_info) from sqlalchemy import ( create_engine, MetaData, Table, Column, String, Integer, ) import re def sanitize_column_name(col_name): return re.sub(r"\W+", "_", col_name) def create_table_from_dataframe( df: pd.DataFrame, table_name: str, engine, metadata_obj ): sanitized_columns = {col: sanitize_column_name(col) for col in df.columns} df = df.rename(columns=sanitized_columns) columns = [ Column(col, String if dtype == "object" else Integer) for col, dtype in zip(df.columns, df.dtypes) ] table = Table(table_name, metadata_obj, *columns) metadata_obj.create_all(engine) with engine.connect() as conn: for _, row in df.iterrows(): insert_stmt = table.insert().values(**row.to_dict()) conn.execute(insert_stmt) conn.commit() engine = create_engine("sqlite:///:memory:") metadata_obj = MetaData() for idx, df in enumerate(dfs): tableinfo = _get_tableinfo_with_index(idx) print(f"Creating table: {tableinfo.table_name}") create_table_from_dataframe(df, tableinfo.table_name, engine, metadata_obj) import phoenix as px import llama_index.core px.launch_app() llama_index.core.set_global_handler("arize_phoenix") from llama_index.core.objects import ( SQLTableNodeMapping, ObjectIndex, SQLTableSchema, ) from llama_index.core import SQLDatabase, VectorStoreIndex sql_database = SQLDatabase(engine) table_node_mapping = SQLTableNodeMapping(sql_database) table_schema_objs = [ SQLTableSchema(table_name=t.table_name, context_str=t.table_summary) for t in table_infos ] # add a SQLTableSchema for each table obj_index = ObjectIndex.from_objects( table_schema_objs, table_node_mapping, VectorStoreIndex, ) obj_retriever = obj_index.as_retriever(similarity_top_k=3) from llama_index.core.retrievers import SQLRetriever from typing import List from llama_index.core.query_pipeline import FnComponent sql_retriever = SQLRetriever(sql_database) def get_table_context_str(table_schema_objs: List[SQLTableSchema]): """Get table context string.""" context_strs = [] for table_schema_obj in table_schema_objs: table_info = sql_database.get_single_table_info( table_schema_obj.table_name ) if table_schema_obj.context_str: table_opt_context = " The table description is: " table_opt_context += table_schema_obj.context_str table_info += table_opt_context context_strs.append(table_info) return "\n\n".join(context_strs) table_parser_component = FnComponent(fn=get_table_context_str) from llama_index.core.prompts.default_prompts import DEFAULT_TEXT_TO_SQL_PROMPT from llama_index.core import PromptTemplate from llama_index.core.query_pipeline import FnComponent from llama_index.core.llms import ChatResponse def parse_response_to_sql(response: ChatResponse) -> str: """Parse response to SQL.""" response = response.message.content sql_query_start = response.find("SQLQuery:") if sql_query_start != -1: response = response[sql_query_start:] if response.startswith("SQLQuery:"): response = response[len("SQLQuery:") :] sql_result_start = response.find("SQLResult:") if sql_result_start != -1: response = response[:sql_result_start] return response.strip().strip("```").strip() sql_parser_component = FnComponent(fn=parse_response_to_sql) text2sql_prompt = DEFAULT_TEXT_TO_SQL_PROMPT.partial_format( dialect=engine.dialect.name ) print(text2sql_prompt.template) response_synthesis_prompt_str = ( "Given an input question, synthesize a response from the query results.\n" "Query: {query_str}\n" "SQL: {sql_query}\n" "SQL Response: {context_str}\n" "Response: " ) response_synthesis_prompt = PromptTemplate( response_synthesis_prompt_str, ) llm = OpenAI(model="gpt-3.5-turbo") from llama_index.core.query_pipeline import ( QueryPipeline as QP, Link, InputComponent, CustomQueryComponent, ) qp = QP( modules={ "input": InputComponent(), "table_retriever": obj_retriever, "table_output_parser": table_parser_component, "text2sql_prompt": text2sql_prompt, "text2sql_llm": llm, "sql_output_parser": sql_parser_component, "sql_retriever": sql_retriever, "response_synthesis_prompt": response_synthesis_prompt, "response_synthesis_llm": llm, }, verbose=True, ) qp.add_chain(["input", "table_retriever", "table_output_parser"]) qp.add_link("input", "text2sql_prompt", dest_key="query_str") qp.add_link("table_output_parser", "text2sql_prompt", dest_key="schema") qp.add_chain( ["text2sql_prompt", "text2sql_llm", "sql_output_parser", "sql_retriever"] ) qp.add_link( "sql_output_parser", "response_synthesis_prompt", dest_key="sql_query" ) qp.add_link( "sql_retriever", "response_synthesis_prompt", dest_key="context_str" ) qp.add_link("input", "response_synthesis_prompt", dest_key="query_str") qp.add_link("response_synthesis_prompt", "response_synthesis_llm") from pyvis.network import Network net = Network(notebook=True, cdn_resources="in_line", directed=True) net.from_nx(qp.dag) net.show("text2sql_dag.html") response = qp.run( query="What was the year that The Notorious B.I.G was signed to Bad Boy?" ) print(str(response)) response = qp.run(query="Who won best director in the 1972 academy awards") print(str(response)) response = qp.run(query="What was the term of Pasquale Preziosa?") print(str(response)) from llama_index.core import VectorStoreIndex, load_index_from_storage from sqlalchemy import text from llama_index.core.schema import TextNode from llama_index.core import StorageContext import os from pathlib import Path from typing import Dict def index_all_tables( sql_database: SQLDatabase, table_index_dir: str = "table_index_dir" ) -> Dict[str, VectorStoreIndex]: """Index all tables.""" if not Path(table_index_dir).exists(): os.makedirs(table_index_dir) vector_index_dict = {} engine = sql_database.engine for table_name in sql_database.get_usable_table_names(): print(f"Indexing rows in table: {table_name}") if not os.path.exists(f"{table_index_dir}/{table_name}"): with engine.connect() as conn: cursor = conn.execute(text(f'SELECT * FROM "{table_name}"')) result = cursor.fetchall() row_tups = [] for row in result: row_tups.append(tuple(row)) nodes = [TextNode(text=str(t)) for t in row_tups] index = VectorStoreIndex(nodes) index.set_index_id("vector_index") index.storage_context.persist(f"{table_index_dir}/{table_name}") else: storage_context = StorageContext.from_defaults( persist_dir=f"{table_index_dir}/{table_name}" ) index = load_index_from_storage( storage_context, index_id="vector_index" ) vector_index_dict[table_name] = index return vector_index_dict vector_index_dict = index_all_tables(sql_database) test_retriever = vector_index_dict["Bad_Boy_Artists"].as_retriever( similarity_top_k=1 ) nodes = test_retriever.retrieve("P. Diddy") print(nodes[0].get_content()) from llama_index.core.retrievers import SQLRetriever from typing import List from llama_index.core.query_pipeline import FnComponent sql_retriever = SQLRetriever(sql_database) def get_table_context_and_rows_str( query_str: str, table_schema_objs: List[SQLTableSchema] ): """Get table context string.""" context_strs = [] for table_schema_obj in table_schema_objs: table_info = sql_database.get_single_table_info( table_schema_obj.table_name ) if table_schema_obj.context_str: table_opt_context = " The table description is: " table_opt_context += table_schema_obj.context_str table_info += table_opt_context vector_retriever = vector_index_dict[ table_schema_obj.table_name ].as_retriever(similarity_top_k=2) relevant_nodes = vector_retriever.retrieve(query_str) if len(relevant_nodes) > 0: table_row_context = "\nHere are some relevant example rows (values in the same order as columns above)\n" for node in relevant_nodes: table_row_context += str(node.get_content()) + "\n" table_info += table_row_context context_strs.append(table_info) return "\n\n".join(context_strs) table_parser_component = FnComponent(fn=get_table_context_and_rows_str) from llama_index.core.query_pipeline import ( QueryPipeline as QP, Link, InputComponent, CustomQueryComponent, ) qp = QP( modules={ "input": InputComponent(), "table_retriever": obj_retriever, "table_output_parser": table_parser_component, "text2sql_prompt": text2sql_prompt, "text2sql_llm": llm, "sql_output_parser": sql_parser_component, "sql_retriever": sql_retriever, "response_synthesis_prompt": response_synthesis_prompt, "response_synthesis_llm": llm, }, verbose=True, ) qp.add_link("input", "table_retriever") qp.add_link("input", "table_output_parser", dest_key="query_str") qp.add_link( "table_retriever", "table_output_parser", dest_key="table_schema_objs" ) qp.add_link("input", "text2sql_prompt", dest_key="query_str") qp.add_link("table_output_parser", "text2sql_prompt", dest_key="schema") qp.add_chain( ["text2sql_prompt", "text2sql_llm", "sql_output_parser", "sql_retriever"] ) qp.add_link( "sql_output_parser", "response_synthesis_prompt", dest_key="sql_query" ) qp.add_link( "sql_retriever", "response_synthesis_prompt", dest_key="context_str" ) qp.add_link("input", "response_synthesis_prompt", dest_key="query_str") qp.add_link("response_synthesis_prompt", "response_synthesis_llm") from pyvis.network import Network net = Network(notebook=True, cdn_resources="in_line", directed=True) net.from_nx(qp.dag) net.show("text2sql_dag.html") response = qp.run( query="What was the year that The Notorious BIG was signed to Bad Boy?" ) print(str(response))
[ "llama_index.core.StorageContext.from_defaults", "llama_index.core.objects.ObjectIndex.from_objects", "llama_index.core.load_index_from_storage", "llama_index.core.objects.SQLTableSchema", "llama_index.core.query_pipeline.FnComponent", "llama_index.core.prompts.default_prompts.DEFAULT_TEXT_TO_SQL_PROMPT.partial_format", "llama_index.core.PromptTemplate", "llama_index.core.query_pipeline.InputComponent", "llama_index.core.VectorStoreIndex", "llama_index.core.bridge.pydantic.Field", "llama_index.core.objects.SQLTableNodeMapping", "llama_index.llms.openai.OpenAI", "llama_index.core.retrievers.SQLRetriever", "llama_index.core.SQLDatabase" ]
[((323, 363), 'pathlib.Path', 'Path', (['"""./WikiTableQuestions/csv/200-csv"""'], {}), "('./WikiTableQuestions/csv/200-csv')\n", (327, 363), False, 'from pathlib import Path\n'), ((3874, 3909), 'sqlalchemy.create_engine', 'create_engine', (['"""sqlite:///:memory:"""'], {}), "('sqlite:///:memory:')\n", (3887, 3909), False, 'from sqlalchemy import create_engine, MetaData, Table, Column, String, Integer\n'), ((3925, 3935), 'sqlalchemy.MetaData', 'MetaData', ([], {}), '()\n', (3933, 3935), False, 'from sqlalchemy import create_engine, MetaData, Table, Column, String, Integer\n'), ((4195, 4210), 'phoenix.launch_app', 'px.launch_app', ([], {}), '()\n', (4208, 4210), True, 'import phoenix as px\n'), ((4446, 4465), 'llama_index.core.SQLDatabase', 'SQLDatabase', (['engine'], {}), '(engine)\n', (4457, 4465), False, 'from llama_index.core import SQLDatabase, VectorStoreIndex\n'), ((4488, 4521), 'llama_index.core.objects.SQLTableNodeMapping', 'SQLTableNodeMapping', (['sql_database'], {}), '(sql_database)\n', (4507, 4521), False, 'from llama_index.core.objects import SQLTableNodeMapping, ObjectIndex, SQLTableSchema\n'), ((4696, 4781), 'llama_index.core.objects.ObjectIndex.from_objects', 'ObjectIndex.from_objects', (['table_schema_objs', 'table_node_mapping', 'VectorStoreIndex'], {}), '(table_schema_objs, table_node_mapping,\n VectorStoreIndex)\n', (4720, 4781), False, 'from llama_index.core.objects import SQLTableNodeMapping, ObjectIndex, SQLTableSchema\n'), ((5005, 5031), 'llama_index.core.retrievers.SQLRetriever', 'SQLRetriever', (['sql_database'], {}), '(sql_database)\n', (5017, 5031), False, 'from llama_index.core.retrievers import SQLRetriever\n'), ((5628, 5665), 'llama_index.core.query_pipeline.FnComponent', 'FnComponent', ([], {'fn': 'get_table_context_str'}), '(fn=get_table_context_str)\n', (5639, 5665), False, 'from llama_index.core.query_pipeline import FnComponent\n'), ((6454, 6491), 'llama_index.core.query_pipeline.FnComponent', 'FnComponent', ([], {'fn': 'parse_response_to_sql'}), '(fn=parse_response_to_sql)\n', (6465, 6491), False, 'from llama_index.core.query_pipeline import FnComponent\n'), ((6511, 6581), 'llama_index.core.prompts.default_prompts.DEFAULT_TEXT_TO_SQL_PROMPT.partial_format', 'DEFAULT_TEXT_TO_SQL_PROMPT.partial_format', ([], {'dialect': 'engine.dialect.name'}), '(dialect=engine.dialect.name)\n', (6552, 6581), False, 'from llama_index.core.prompts.default_prompts import DEFAULT_TEXT_TO_SQL_PROMPT\n'), ((6871, 6916), 'llama_index.core.PromptTemplate', 'PromptTemplate', (['response_synthesis_prompt_str'], {}), '(response_synthesis_prompt_str)\n', (6885, 6916), False, 'from llama_index.core import PromptTemplate\n'), ((6932, 6961), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""'}), "(model='gpt-3.5-turbo')\n", (6938, 6961), False, 'from llama_index.llms.openai import OpenAI\n'), ((8202, 8264), 'pyvis.network.Network', 'Network', ([], {'notebook': '(True)', 'cdn_resources': '"""in_line"""', 'directed': '(True)'}), "(notebook=True, cdn_resources='in_line', directed=True)\n", (8209, 8264), False, 'from pyvis.network import Network\n'), ((10585, 10611), 'llama_index.core.retrievers.SQLRetriever', 'SQLRetriever', (['sql_database'], {}), '(sql_database)\n', (10597, 10611), False, 'from llama_index.core.retrievers import SQLRetriever\n'), ((11737, 11783), 'llama_index.core.query_pipeline.FnComponent', 'FnComponent', ([], {'fn': 'get_table_context_and_rows_str'}), '(fn=get_table_context_and_rows_str)\n', (11748, 11783), False, 'from llama_index.core.query_pipeline import FnComponent\n'), ((13153, 13215), 'pyvis.network.Network', 'Network', ([], {'notebook': '(True)', 'cdn_resources': '"""in_line"""', 'directed': '(True)'}), "(notebook=True, cdn_resources='in_line', directed=True)\n", (13160, 13215), False, 'from pyvis.network import Network\n'), ((1015, 1087), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""table name (must be underscores and NO spaces)"""'}), "(..., description='table name (must be underscores and NO spaces)')\n", (1020, 1087), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1127, 1196), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""short, concise summary/caption of the table"""'}), "(..., description='short, concise summary/caption of the table')\n", (1132, 1196), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((3182, 3211), 're.sub', 're.sub', (['"""\\\\W+"""', '"""_"""', 'col_name'], {}), "('\\\\W+', '_', col_name)\n", (3188, 3211), False, 'import re\n'), ((3586, 3627), 'sqlalchemy.Table', 'Table', (['table_name', 'metadata_obj', '*columns'], {}), '(table_name, metadata_obj, *columns)\n', (3591, 3627), False, 'from sqlalchemy import create_engine, MetaData, Table, Column, String, Integer\n'), ((4548, 4616), 'llama_index.core.objects.SQLTableSchema', 'SQLTableSchema', ([], {'table_name': 't.table_name', 'context_str': 't.table_summary'}), '(table_name=t.table_name, context_str=t.table_summary)\n', (4562, 4616), False, 'from llama_index.core.objects import SQLTableNodeMapping, ObjectIndex, SQLTableSchema\n'), ((520, 541), 'pandas.read_csv', 'pd.read_csv', (['csv_file'], {}), '(csv_file)\n', (531, 541), True, 'import pandas as pd\n'), ((1634, 1663), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""'}), "(model='gpt-3.5-turbo')\n", (1640, 1663), False, 'from llama_index.llms.openai import OpenAI\n'), ((3460, 3513), 'sqlalchemy.Column', 'Column', (['col', "(String if dtype == 'object' else Integer)"], {}), "(col, String if dtype == 'object' else Integer)\n", (3466, 3513), False, 'from sqlalchemy import create_engine, MetaData, Table, Column, String, Integer\n'), ((9087, 9115), 'os.makedirs', 'os.makedirs', (['table_index_dir'], {}), '(table_index_dir)\n', (9098, 9115), False, 'import os\n'), ((1785, 1804), 'pathlib.Path', 'Path', (['tableinfo_dir'], {}), '(tableinfo_dir)\n', (1789, 1804), False, 'from pathlib import Path\n'), ((7135, 7151), 'llama_index.core.query_pipeline.InputComponent', 'InputComponent', ([], {}), '()\n', (7149, 7151), False, 'from llama_index.core.query_pipeline import QueryPipeline as QP, Link, InputComponent, CustomQueryComponent\n'), ((9308, 9357), 'os.path.exists', 'os.path.exists', (['f"""{table_index_dir}/{table_name}"""'], {}), "(f'{table_index_dir}/{table_name}')\n", (9322, 9357), False, 'import os\n'), ((9719, 9742), 'llama_index.core.VectorStoreIndex', 'VectorStoreIndex', (['nodes'], {}), '(nodes)\n', (9735, 9742), False, 'from llama_index.core import VectorStoreIndex, load_index_from_storage\n'), ((9912, 9987), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'f"""{table_index_dir}/{table_name}"""'}), "(persist_dir=f'{table_index_dir}/{table_name}')\n", (9940, 9987), False, 'from llama_index.core import StorageContext\n'), ((10038, 10103), 'llama_index.core.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {'index_id': '"""vector_index"""'}), "(storage_context, index_id='vector_index')\n", (10061, 10103), False, 'from llama_index.core import VectorStoreIndex, load_index_from_storage\n'), ((11957, 11973), 'llama_index.core.query_pipeline.InputComponent', 'InputComponent', ([], {}), '()\n', (11971, 11973), False, 'from llama_index.core.query_pipeline import QueryPipeline as QP, Link, InputComponent, CustomQueryComponent\n'), ((9047, 9068), 'pathlib.Path', 'Path', (['table_index_dir'], {}), '(table_index_dir)\n', (9051, 9068), False, 'from pathlib import Path\n'), ((9440, 9477), 'sqlalchemy.text', 'text', (['f"""SELECT * FROM "{table_name}\\""""'], {}), '(f\'SELECT * FROM "{table_name}"\')\n', (9444, 9477), False, 'from sqlalchemy import text\n')]
from llama_index.core import SQLDatabase from sqlalchemy import ( create_engine, MetaData, Table, Column, String, Integer, select, column, ) engine = create_engine("sqlite:///chinook.db") sql_database = SQLDatabase(engine) from llama_index.core.query_pipeline import QueryPipeline get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().system('curl "https://www.sqlitetutorial.net/wp-content/uploads/2018/03/chinook.zip" -O ./chinook.zip') get_ipython().system('unzip ./chinook.zip') from llama_index.core.settings import Settings from llama_index.core.callbacks import CallbackManager callback_manager = CallbackManager() Settings.callback_manager = callback_manager import phoenix as px import llama_index.core px.launch_app() llama_index.core.set_global_handler("arize_phoenix") from llama_index.core.query_engine import NLSQLTableQueryEngine from llama_index.core.tools import QueryEngineTool sql_query_engine = NLSQLTableQueryEngine( sql_database=sql_database, tables=["albums", "tracks", "artists"], verbose=True, ) sql_tool = QueryEngineTool.from_defaults( query_engine=sql_query_engine, name="sql_tool", description=( "Useful for translating a natural language query into a SQL query" ), ) from llama_index.core.query_pipeline import QueryPipeline as QP qp = QP(verbose=True) from llama_index.core.agent.react.types import ( ActionReasoningStep, ObservationReasoningStep, ResponseReasoningStep, ) from llama_index.core.agent import Task, AgentChatResponse from llama_index.core.query_pipeline import ( AgentInputComponent, AgentFnComponent, CustomAgentComponent, QueryComponent, ToolRunnerComponent, ) from llama_index.core.llms import MessageRole from typing import Dict, Any, Optional, Tuple, List, cast def agent_input_fn(task: Task, state: Dict[str, Any]) -> Dict[str, Any]: """Agent input function. Returns: A Dictionary of output keys and values. If you are specifying src_key when defining links between this component and other components, make sure the src_key matches the specified output_key. """ if "current_reasoning" not in state: state["current_reasoning"] = [] reasoning_step = ObservationReasoningStep(observation=task.input) state["current_reasoning"].append(reasoning_step) return {"input": task.input} agent_input_component = AgentInputComponent(fn=agent_input_fn) from llama_index.core.agent import ReActChatFormatter from llama_index.core.query_pipeline import InputComponent, Link from llama_index.core.llms import ChatMessage from llama_index.core.tools import BaseTool def react_prompt_fn( task: Task, state: Dict[str, Any], input: str, tools: List[BaseTool] ) -> List[ChatMessage]: chat_formatter = ReActChatFormatter() return chat_formatter.format( tools, chat_history=task.memory.get() + state["memory"].get_all(), current_reasoning=state["current_reasoning"], ) react_prompt_component = AgentFnComponent( fn=react_prompt_fn, partial_dict={"tools": [sql_tool]} ) from typing import Set, Optional from llama_index.core.agent.react.output_parser import ReActOutputParser from llama_index.core.llms import ChatResponse from llama_index.core.agent.types import Task def parse_react_output_fn( task: Task, state: Dict[str, Any], chat_response: ChatResponse ): """Parse ReAct output into a reasoning step.""" output_parser = ReActOutputParser() reasoning_step = output_parser.parse(chat_response.message.content) return {"done": reasoning_step.is_done, "reasoning_step": reasoning_step} parse_react_output = AgentFnComponent(fn=parse_react_output_fn) def run_tool_fn( task: Task, state: Dict[str, Any], reasoning_step: ActionReasoningStep ): """Run tool and process tool output.""" tool_runner_component = ToolRunnerComponent( [sql_tool], callback_manager=task.callback_manager ) tool_output = tool_runner_component.run_component( tool_name=reasoning_step.action, tool_input=reasoning_step.action_input, ) observation_step = ObservationReasoningStep(observation=str(tool_output)) state["current_reasoning"].append(observation_step) return {"response_str": observation_step.get_content(), "is_done": False} run_tool = AgentFnComponent(fn=run_tool_fn) def process_response_fn( task: Task, state: Dict[str, Any], response_step: ResponseReasoningStep ): """Process response.""" state["current_reasoning"].append(response_step) response_str = response_step.response state["memory"].put(ChatMessage(content=task.input, role=MessageRole.USER)) state["memory"].put( ChatMessage(content=response_str, role=MessageRole.ASSISTANT) ) return {"response_str": response_str, "is_done": True} process_response = AgentFnComponent(fn=process_response_fn) def process_agent_response_fn( task: Task, state: Dict[str, Any], response_dict: dict ): """Process agent response.""" return ( AgentChatResponse(response_dict["response_str"]), response_dict["is_done"], ) process_agent_response = AgentFnComponent(fn=process_agent_response_fn) from llama_index.core.query_pipeline import QueryPipeline as QP from llama_index.llms.openai import OpenAI qp.add_modules( { "agent_input": agent_input_component, "react_prompt": react_prompt_component, "llm": OpenAI(model="gpt-4-1106-preview"), "react_output_parser": parse_react_output, "run_tool": run_tool, "process_response": process_response, "process_agent_response": process_agent_response, } ) qp.add_chain(["agent_input", "react_prompt", "llm", "react_output_parser"]) qp.add_link( "react_output_parser", "run_tool", condition_fn=lambda x: not x["done"], input_fn=lambda x: x["reasoning_step"], ) qp.add_link( "react_output_parser", "process_response", condition_fn=lambda x: x["done"], input_fn=lambda x: x["reasoning_step"], ) qp.add_link("process_response", "process_agent_response") qp.add_link("run_tool", "process_agent_response") from pyvis.network import Network net = Network(notebook=True, cdn_resources="in_line", directed=True) net.from_nx(qp.clean_dag) net.show("agent_dag.html") from llama_index.core.agent import QueryPipelineAgentWorker, AgentRunner from llama_index.core.callbacks import CallbackManager agent_worker = QueryPipelineAgentWorker(qp) agent = AgentRunner( agent_worker, callback_manager=CallbackManager([]), verbose=True ) task = agent.create_task( "What are some tracks from the artist AC/DC? Limit it to 3" ) step_output = agent.run_step(task.task_id) step_output = agent.run_step(task.task_id) step_output.is_last response = agent.finalize_response(task.task_id) print(str(response)) agent.reset() response = agent.chat( "What are some tracks from the artist AC/DC? Limit it to 3" ) print(str(response)) from llama_index.llms.openai import OpenAI llm = OpenAI(model="gpt-4-1106-preview") from llama_index.core.agent import Task, AgentChatResponse from typing import Dict, Any from llama_index.core.query_pipeline import ( AgentInputComponent, AgentFnComponent, ) def agent_input_fn(task: Task, state: Dict[str, Any]) -> Dict: """Agent input function.""" if "convo_history" not in state: state["convo_history"] = [] state["count"] = 0 state["convo_history"].append(f"User: {task.input}") convo_history_str = "\n".join(state["convo_history"]) or "None" return {"input": task.input, "convo_history": convo_history_str} agent_input_component = AgentInputComponent(fn=agent_input_fn) from llama_index.core import PromptTemplate retry_prompt_str = """\ You are trying to generate a proper natural language query given a user input. This query will then be interpreted by a downstream text-to-SQL agent which will convert the query to a SQL statement. If the agent triggers an error, then that will be reflected in the current conversation history (see below). If the conversation history is None, use the user input. If its not None, generate a new SQL query that avoids the problems of the previous SQL query. Input: {input} Convo history (failed attempts): {convo_history} New input: """ retry_prompt = PromptTemplate(retry_prompt_str) from llama_index.core import Response from typing import Tuple validate_prompt_str = """\ Given the user query, validate whether the inferred SQL query and response from executing the query is correct and answers the query. Answer with YES or NO. Query: {input} Inferred SQL query: {sql_query} SQL Response: {sql_response} Result: """ validate_prompt = PromptTemplate(validate_prompt_str) MAX_ITER = 3 def agent_output_fn( task: Task, state: Dict[str, Any], output: Response ) -> Tuple[AgentChatResponse, bool]: """Agent output component.""" print(f"> Inferred SQL Query: {output.metadata['sql_query']}") print(f"> SQL Response: {str(output)}") state["convo_history"].append( f"Assistant (inferred SQL query): {output.metadata['sql_query']}" ) state["convo_history"].append(f"Assistant (response): {str(output)}") validate_prompt_partial = validate_prompt.as_query_component( partial={ "sql_query": output.metadata["sql_query"], "sql_response": str(output), } ) qp = QP(chain=[validate_prompt_partial, llm]) validate_output = qp.run(input=task.input) state["count"] += 1 is_done = False if state["count"] >= MAX_ITER: is_done = True if "YES" in validate_output.message.content: is_done = True return AgentChatResponse(response=str(output)), is_done agent_output_component = AgentFnComponent(fn=agent_output_fn) from llama_index.core.query_pipeline import ( QueryPipeline as QP, Link, InputComponent, ) qp = QP( modules={ "input": agent_input_component, "retry_prompt": retry_prompt, "llm": llm, "sql_query_engine": sql_query_engine, "output_component": agent_output_component, }, verbose=True, ) qp.add_link("input", "retry_prompt", src_key="input", dest_key="input") qp.add_link( "input", "retry_prompt", src_key="convo_history", dest_key="convo_history" ) qp.add_chain(["retry_prompt", "llm", "sql_query_engine", "output_component"]) from pyvis.network import Network net = Network(notebook=True, cdn_resources="in_line", directed=True) net.from_nx(qp.dag) net.show("agent_dag.html") from llama_index.core.agent import QueryPipelineAgentWorker, AgentRunner from llama_index.core.callbacks import CallbackManager agent_worker = QueryPipelineAgentWorker(qp) agent = AgentRunner( agent_worker, callback_manager=CallbackManager(), verbose=False ) response = agent.chat( "How many albums did the artist who wrote 'Restless and Wild' release? (answer should be non-zero)?" ) print(str(response))
[ "llama_index.core.query_engine.NLSQLTableQueryEngine", "llama_index.core.callbacks.CallbackManager", "llama_index.core.agent.QueryPipelineAgentWorker", "llama_index.core.agent.react.output_parser.ReActOutputParser", "llama_index.core.query_pipeline.ToolRunnerComponent", "llama_index.core.PromptTemplate", "llama_index.core.query_pipeline.QueryPipeline", "llama_index.core.agent.AgentChatResponse", "llama_index.core.agent.react.types.ObservationReasoningStep", "llama_index.core.agent.ReActChatFormatter", "llama_index.core.query_pipeline.AgentFnComponent", "llama_index.llms.openai.OpenAI", "llama_index.core.llms.ChatMessage", "llama_index.core.query_pipeline.AgentInputComponent", "llama_index.core.tools.QueryEngineTool.from_defaults", "llama_index.core.SQLDatabase" ]
[((183, 220), 'sqlalchemy.create_engine', 'create_engine', (['"""sqlite:///chinook.db"""'], {}), "('sqlite:///chinook.db')\n", (196, 220), False, 'from sqlalchemy import create_engine, MetaData, Table, Column, String, Integer, select, column\n'), ((236, 255), 'llama_index.core.SQLDatabase', 'SQLDatabase', (['engine'], {}), '(engine)\n', (247, 255), False, 'from llama_index.core import SQLDatabase\n'), ((679, 696), 'llama_index.core.callbacks.CallbackManager', 'CallbackManager', ([], {}), '()\n', (694, 696), False, 'from llama_index.core.callbacks import CallbackManager\n'), ((790, 805), 'phoenix.launch_app', 'px.launch_app', ([], {}), '()\n', (803, 805), True, 'import phoenix as px\n'), ((997, 1103), 'llama_index.core.query_engine.NLSQLTableQueryEngine', 'NLSQLTableQueryEngine', ([], {'sql_database': 'sql_database', 'tables': "['albums', 'tracks', 'artists']", 'verbose': '(True)'}), "(sql_database=sql_database, tables=['albums', 'tracks',\n 'artists'], verbose=True)\n", (1018, 1103), False, 'from llama_index.core.query_engine import NLSQLTableQueryEngine\n'), ((1126, 1293), 'llama_index.core.tools.QueryEngineTool.from_defaults', 'QueryEngineTool.from_defaults', ([], {'query_engine': 'sql_query_engine', 'name': '"""sql_tool"""', 'description': '"""Useful for translating a natural language query into a SQL query"""'}), "(query_engine=sql_query_engine, name=\n 'sql_tool', description=\n 'Useful for translating a natural language query into a SQL query')\n", (1155, 1293), False, 'from llama_index.core.tools import QueryEngineTool\n'), ((1388, 1404), 'llama_index.core.query_pipeline.QueryPipeline', 'QP', ([], {'verbose': '(True)'}), '(verbose=True)\n', (1390, 1404), True, 'from llama_index.core.query_pipeline import QueryPipeline as QP, Link, InputComponent\n'), ((2476, 2514), 'llama_index.core.query_pipeline.AgentInputComponent', 'AgentInputComponent', ([], {'fn': 'agent_input_fn'}), '(fn=agent_input_fn)\n', (2495, 2514), False, 'from llama_index.core.query_pipeline import AgentInputComponent, AgentFnComponent\n'), ((3093, 3165), 'llama_index.core.query_pipeline.AgentFnComponent', 'AgentFnComponent', ([], {'fn': 'react_prompt_fn', 'partial_dict': "{'tools': [sql_tool]}"}), "(fn=react_prompt_fn, partial_dict={'tools': [sql_tool]})\n", (3109, 3165), False, 'from llama_index.core.query_pipeline import AgentInputComponent, AgentFnComponent\n'), ((3738, 3780), 'llama_index.core.query_pipeline.AgentFnComponent', 'AgentFnComponent', ([], {'fn': 'parse_react_output_fn'}), '(fn=parse_react_output_fn)\n', (3754, 3780), False, 'from llama_index.core.query_pipeline import AgentInputComponent, AgentFnComponent\n'), ((4412, 4444), 'llama_index.core.query_pipeline.AgentFnComponent', 'AgentFnComponent', ([], {'fn': 'run_tool_fn'}), '(fn=run_tool_fn)\n', (4428, 4444), False, 'from llama_index.core.query_pipeline import AgentInputComponent, AgentFnComponent\n'), ((4936, 4976), 'llama_index.core.query_pipeline.AgentFnComponent', 'AgentFnComponent', ([], {'fn': 'process_response_fn'}), '(fn=process_response_fn)\n', (4952, 4976), False, 'from llama_index.core.query_pipeline import AgentInputComponent, AgentFnComponent\n'), ((5244, 5290), 'llama_index.core.query_pipeline.AgentFnComponent', 'AgentFnComponent', ([], {'fn': 'process_agent_response_fn'}), '(fn=process_agent_response_fn)\n', (5260, 5290), False, 'from llama_index.core.query_pipeline import AgentInputComponent, AgentFnComponent\n'), ((6286, 6348), 'pyvis.network.Network', 'Network', ([], {'notebook': '(True)', 'cdn_resources': '"""in_line"""', 'directed': '(True)'}), "(notebook=True, cdn_resources='in_line', directed=True)\n", (6293, 6348), False, 'from pyvis.network import Network\n'), ((6549, 6577), 'llama_index.core.agent.QueryPipelineAgentWorker', 'QueryPipelineAgentWorker', (['qp'], {}), '(qp)\n', (6573, 6577), False, 'from llama_index.core.agent import QueryPipelineAgentWorker, AgentRunner\n'), ((7133, 7167), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-4-1106-preview"""'}), "(model='gpt-4-1106-preview')\n", (7139, 7167), False, 'from llama_index.llms.openai import OpenAI\n'), ((7770, 7808), 'llama_index.core.query_pipeline.AgentInputComponent', 'AgentInputComponent', ([], {'fn': 'agent_input_fn'}), '(fn=agent_input_fn)\n', (7789, 7808), False, 'from llama_index.core.query_pipeline import AgentInputComponent, AgentFnComponent\n'), ((8437, 8469), 'llama_index.core.PromptTemplate', 'PromptTemplate', (['retry_prompt_str'], {}), '(retry_prompt_str)\n', (8451, 8469), False, 'from llama_index.core import PromptTemplate\n'), ((8829, 8864), 'llama_index.core.PromptTemplate', 'PromptTemplate', (['validate_prompt_str'], {}), '(validate_prompt_str)\n', (8843, 8864), False, 'from llama_index.core import PromptTemplate\n'), ((9886, 9922), 'llama_index.core.query_pipeline.AgentFnComponent', 'AgentFnComponent', ([], {'fn': 'agent_output_fn'}), '(fn=agent_output_fn)\n', (9902, 9922), False, 'from llama_index.core.query_pipeline import AgentInputComponent, AgentFnComponent\n'), ((10034, 10224), 'llama_index.core.query_pipeline.QueryPipeline', 'QP', ([], {'modules': "{'input': agent_input_component, 'retry_prompt': retry_prompt, 'llm': llm,\n 'sql_query_engine': sql_query_engine, 'output_component':\n agent_output_component}", 'verbose': '(True)'}), "(modules={'input': agent_input_component, 'retry_prompt': retry_prompt,\n 'llm': llm, 'sql_query_engine': sql_query_engine, 'output_component':\n agent_output_component}, verbose=True)\n", (10036, 10224), True, 'from llama_index.core.query_pipeline import QueryPipeline as QP, Link, InputComponent\n'), ((10563, 10625), 'pyvis.network.Network', 'Network', ([], {'notebook': '(True)', 'cdn_resources': '"""in_line"""', 'directed': '(True)'}), "(notebook=True, cdn_resources='in_line', directed=True)\n", (10570, 10625), False, 'from pyvis.network import Network\n'), ((10820, 10848), 'llama_index.core.agent.QueryPipelineAgentWorker', 'QueryPipelineAgentWorker', (['qp'], {}), '(qp)\n', (10844, 10848), False, 'from llama_index.core.agent import QueryPipelineAgentWorker, AgentRunner\n'), ((2314, 2362), 'llama_index.core.agent.react.types.ObservationReasoningStep', 'ObservationReasoningStep', ([], {'observation': 'task.input'}), '(observation=task.input)\n', (2338, 2362), False, 'from llama_index.core.agent.react.types import ActionReasoningStep, ObservationReasoningStep, ResponseReasoningStep\n'), ((2868, 2888), 'llama_index.core.agent.ReActChatFormatter', 'ReActChatFormatter', ([], {}), '()\n', (2886, 2888), False, 'from llama_index.core.agent import ReActChatFormatter\n'), ((3545, 3564), 'llama_index.core.agent.react.output_parser.ReActOutputParser', 'ReActOutputParser', ([], {}), '()\n', (3562, 3564), False, 'from llama_index.core.agent.react.output_parser import ReActOutputParser\n'), ((3950, 4021), 'llama_index.core.query_pipeline.ToolRunnerComponent', 'ToolRunnerComponent', (['[sql_tool]'], {'callback_manager': 'task.callback_manager'}), '([sql_tool], callback_manager=task.callback_manager)\n', (3969, 4021), False, 'from llama_index.core.query_pipeline import AgentInputComponent, AgentFnComponent, CustomAgentComponent, QueryComponent, ToolRunnerComponent\n'), ((9535, 9575), 'llama_index.core.query_pipeline.QueryPipeline', 'QP', ([], {'chain': '[validate_prompt_partial, llm]'}), '(chain=[validate_prompt_partial, llm])\n', (9537, 9575), True, 'from llama_index.core.query_pipeline import QueryPipeline as QP, Link, InputComponent\n'), ((4698, 4752), 'llama_index.core.llms.ChatMessage', 'ChatMessage', ([], {'content': 'task.input', 'role': 'MessageRole.USER'}), '(content=task.input, role=MessageRole.USER)\n', (4709, 4752), False, 'from llama_index.core.llms import ChatMessage\n'), ((4787, 4848), 'llama_index.core.llms.ChatMessage', 'ChatMessage', ([], {'content': 'response_str', 'role': 'MessageRole.ASSISTANT'}), '(content=response_str, role=MessageRole.ASSISTANT)\n', (4798, 4848), False, 'from llama_index.core.llms import ChatMessage\n'), ((5127, 5175), 'llama_index.core.agent.AgentChatResponse', 'AgentChatResponse', (["response_dict['response_str']"], {}), "(response_dict['response_str'])\n", (5144, 5175), False, 'from llama_index.core.agent import Task, AgentChatResponse\n'), ((5533, 5567), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-4-1106-preview"""'}), "(model='gpt-4-1106-preview')\n", (5539, 5567), False, 'from llama_index.llms.openai import OpenAI\n'), ((6634, 6653), 'llama_index.core.callbacks.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (6649, 6653), False, 'from llama_index.core.callbacks import CallbackManager\n'), ((10905, 10922), 'llama_index.core.callbacks.CallbackManager', 'CallbackManager', ([], {}), '()\n', (10920, 10922), False, 'from llama_index.core.callbacks import CallbackManager\n')]
import requests import pandas as pd from bs4 import BeautifulSoup import os from llama_index import SimpleDirectoryReader,GPTListIndex,GPTVectorStoreIndex,LLMPredictor,PromptHelper,ServiceContext,StorageContext from langchain import OpenAI import openai import llama_index # from main import secret_key # with open('key.txt','r') as f: # secret_key=f.read().strip() # os.environ["OPENAI_API_KEY"]=secret_key # secret_key = os.getenv('api_key') from langchain import OpenAI Base_Dir=os.getcwd() from PyPDF2 import PdfReader,PdfWriter def api_status(key): # Set your OpenAI API key # os.environ('OPENAI_API') # openai.api_key="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" openai.api_key=key # Try to create a completion try: response = openai.Completion.create( engine="text-davinci-001", prompt="What is the meaning of life?", temperature=0.5, max_tokens=60, top_p=0.3, frequency_penalty=0.5, presence_penalty=0.0, ) except openai.OpenAIError as e: return False else: return True def get_chat_response(question,api_key): # API endpoint url = 'https://api.openai.com/v1/chat/completions' # Your OpenAI API key # api_key = secret_key # Request headers headers = { 'Content-Type': 'application/json', 'Authorization': f'Bearer {api_key}' } # Request payload payload = { 'model': 'gpt-3.5-turbo', 'messages': [{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': question}] } # Send POST request to the API response = requests.post(url, headers=headers, json=payload) # Parse the response data = response.json() # return data try: reply= data['choices'][0]['message']['content'] # Get the model's reply # reply = data['choices'][0]['message']['content'] return reply except Exception as e: print(e) return None def company_with_url(company_name): csv_path=os.path.join(Base_Dir,'companies_data.csv') from sentence_transformers import SentenceTransformer,util encode_model = SentenceTransformer('paraphrase-MiniLM-L6-v2') df=pd.read_csv(csv_path) companies=list(df['company']) companies_urls=list(df['screener url']) encoded_names=encode_model.encode(companies) cos=util.cos_sim(encode_model.encode(company_name.split()[0]),encoded_names) similar=list(map(lambda x:x.items,cos[0])) index=similar.index(max(similar)) # m=0 # index=0 # for i in range(len(cos[0])): # if m<cos[0][i].item(): # index=i # m=cos[0][i] company=companies[index] screener_url=companies_urls[index] return (company,screener_url) def report_url(url): soup_annual=BeautifulSoup(requests.get(url).content,'html.parser') annual_urls=[i.get('href') for i in soup_annual.find_all('a')] annual_reports=[] for i in annual_urls: if 'Annual' in i and '#' not in i: annual_reports.append(i) annual_report_2022=annual_reports[0] return annual_report_2022 def autodownload_report(url,company): headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.162 Safari/537.36' } response = requests.get(url, stream=True,headers=headers) folder_path=os.path.join(Base_Dir,f'Annual_reports/{company}_report') if not os.path.exists(folder_path): os.mkdir(folder_path) print('folder created') pdf_path=os.path.join(Base_Dir,f'{company}_2022.pdf') # print(pdf_path) with open(pdf_path, "wb") as f: for chunk in response.iter_content(chunk_size=1024): f.write(chunk) return def pdf2txt(pdf_path,company): if not os.path.exists(os.path.join(Base_Dir,f'Annual_reports/{company}_report/{company}_2022.txt')): titles = ['STANDALONE BALANCE SHEET', 'STANDALONE STATEMENT OF PROFIT AND LOSS', 'Balance Sheet', 'Balance Sheet (contd.)', 'Statement of Profit and Loss', 'Statement of Profit and Loss (contd.)'] with open(pdf_path, 'rb') as pdf_file: # Create a PDF reader object pdf_reader = PdfReader(pdf_file) text='' pdf_writer = PdfWriter() page_no=0 for page in pdf_reader.pages: page_content=page.extract_text() page_no+=1 for word in titles: if word in page_content: # print(page_no) text+=page.extract_text() pdf_writer.add_page(page) with open(f'{company}_imp.pdf', 'wb') as output_file: pdf_writer.write(output_file) txt_path=os.path.join(Base_Dir,f'Annual_reports/{company}_report/{company}_2022.txt') with open(txt_path,'w',encoding='utf-8') as f: f.write(text) print('created txt file') pdf_path=os.path.join(Base_Dir,f'{company}_2022.pdf') os.remove(pdf_path) print('removed pdf file') return import base64 def display_pdf(pdf_file): with open(pdf_file, "rb") as f: base64_pdf = base64.b64encode(f.read()).decode('utf-8') pdf_display = f'<iframe src="data:application/pdf;base64,{base64_pdf}" width="700" height="500" type="application/pdf"></iframe>' return pdf_display def create_index(company,secret_key): import openai vstore_path=os.path.join(Base_Dir,f'vector_stores/{company}_vstore') doc_path=os.path.join(Base_Dir,f'Annual_reports/{company}_report') if not os.path.exists(vstore_path): os.mkdir(vstore_path) max_input=4096 tokens=200 chunk_size=600 max_chunk_overlap=20 promptHelpter=PromptHelper(max_input,max_chunk_overlap,chunk_size_limit=chunk_size) openai.api_key=secret_key llmPredictor=LLMPredictor(llm=OpenAI(temperature=0,model_name='text-ada-001',max_tokens=tokens)) docs=SimpleDirectoryReader(doc_path).load_data() service_context=ServiceContext.from_defaults(llm_predictor=llmPredictor,prompt_helper=promptHelpter) openai.api_key=secret_key vectorIndex=GPTVectorStoreIndex.from_documents(documents=docs) vectorIndex.storage_context.persist(persist_dir=vstore_path) return def load_index(vstore_path): # rebuild storage context storage_context = StorageContext.from_defaults(persist_dir=vstore_path) # load index index = llama_index.load_index_from_storage(storage_context) return index # print(index) def give_answer(index,que): return index.as_query_engine().query(que) def answerMe(question,company): vstore_path=os.path.join(Base_Dir,f'vector_stores/{company}_vstore') storage_context=StorageContext.from_defaults(persist_dir=vstore_path) # index=load_index_from_storage(storage_context) index=llama_index.load_index_from_storage(storage_context) query_engine=index.as_query_engine() response=query_engine.query(question) return response.response def balance(url): dfs = pd.read_html(url) return dfs[6] def shareholding(url): dfs = pd.read_html(url) return dfs[10] def balance(url): dfs = pd.read_html(url) return dfs[6]
[ "llama_index.GPTVectorStoreIndex.from_documents", "llama_index.ServiceContext.from_defaults", "llama_index.StorageContext.from_defaults", "llama_index.SimpleDirectoryReader", "llama_index.load_index_from_storage", "llama_index.PromptHelper" ]
[((490, 501), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (499, 501), False, 'import os\n'), ((1741, 1790), 'requests.post', 'requests.post', (['url'], {'headers': 'headers', 'json': 'payload'}), '(url, headers=headers, json=payload)\n', (1754, 1790), False, 'import requests\n'), ((2167, 2211), 'os.path.join', 'os.path.join', (['Base_Dir', '"""companies_data.csv"""'], {}), "(Base_Dir, 'companies_data.csv')\n", (2179, 2211), False, 'import os\n'), ((2293, 2339), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['"""paraphrase-MiniLM-L6-v2"""'], {}), "('paraphrase-MiniLM-L6-v2')\n", (2312, 2339), False, 'from sentence_transformers import SentenceTransformer, util\n'), ((2347, 2368), 'pandas.read_csv', 'pd.read_csv', (['csv_path'], {}), '(csv_path)\n', (2358, 2368), True, 'import pandas as pd\n'), ((3497, 3544), 'requests.get', 'requests.get', (['url'], {'stream': '(True)', 'headers': 'headers'}), '(url, stream=True, headers=headers)\n', (3509, 3544), False, 'import requests\n'), ((3561, 3619), 'os.path.join', 'os.path.join', (['Base_Dir', 'f"""Annual_reports/{company}_report"""'], {}), "(Base_Dir, f'Annual_reports/{company}_report')\n", (3573, 3619), False, 'import os\n'), ((5768, 5825), 'os.path.join', 'os.path.join', (['Base_Dir', 'f"""vector_stores/{company}_vstore"""'], {}), "(Base_Dir, f'vector_stores/{company}_vstore')\n", (5780, 5825), False, 'import os\n'), ((5838, 5896), 'os.path.join', 'os.path.join', (['Base_Dir', 'f"""Annual_reports/{company}_report"""'], {}), "(Base_Dir, f'Annual_reports/{company}_report')\n", (5850, 5896), False, 'import os\n'), ((6782, 6835), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'vstore_path'}), '(persist_dir=vstore_path)\n', (6810, 6835), False, 'from llama_index import SimpleDirectoryReader, GPTListIndex, GPTVectorStoreIndex, LLMPredictor, PromptHelper, ServiceContext, StorageContext\n'), ((6865, 6917), 'llama_index.load_index_from_storage', 'llama_index.load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (6900, 6917), False, 'import llama_index\n'), ((7079, 7136), 'os.path.join', 'os.path.join', (['Base_Dir', 'f"""vector_stores/{company}_vstore"""'], {}), "(Base_Dir, f'vector_stores/{company}_vstore')\n", (7091, 7136), False, 'import os\n'), ((7156, 7209), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'vstore_path'}), '(persist_dir=vstore_path)\n', (7184, 7209), False, 'from llama_index import SimpleDirectoryReader, GPTListIndex, GPTVectorStoreIndex, LLMPredictor, PromptHelper, ServiceContext, StorageContext\n'), ((7273, 7325), 'llama_index.load_index_from_storage', 'llama_index.load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (7308, 7325), False, 'import llama_index\n'), ((7473, 7490), 'pandas.read_html', 'pd.read_html', (['url'], {}), '(url)\n', (7485, 7490), True, 'import pandas as pd\n'), ((7543, 7560), 'pandas.read_html', 'pd.read_html', (['url'], {}), '(url)\n', (7555, 7560), True, 'import pandas as pd\n'), ((7609, 7626), 'pandas.read_html', 'pd.read_html', (['url'], {}), '(url)\n', (7621, 7626), True, 'import pandas as pd\n'), ((782, 970), 'openai.Completion.create', 'openai.Completion.create', ([], {'engine': '"""text-davinci-001"""', 'prompt': '"""What is the meaning of life?"""', 'temperature': '(0.5)', 'max_tokens': '(60)', 'top_p': '(0.3)', 'frequency_penalty': '(0.5)', 'presence_penalty': '(0.0)'}), "(engine='text-davinci-001', prompt=\n 'What is the meaning of life?', temperature=0.5, max_tokens=60, top_p=\n 0.3, frequency_penalty=0.5, presence_penalty=0.0)\n", (806, 970), False, 'import openai\n'), ((3631, 3658), 'os.path.exists', 'os.path.exists', (['folder_path'], {}), '(folder_path)\n', (3645, 3658), False, 'import os\n'), ((3668, 3689), 'os.mkdir', 'os.mkdir', (['folder_path'], {}), '(folder_path)\n', (3676, 3689), False, 'import os\n'), ((3739, 3784), 'os.path.join', 'os.path.join', (['Base_Dir', 'f"""{company}_2022.pdf"""'], {}), "(Base_Dir, f'{company}_2022.pdf')\n", (3751, 3784), False, 'import os\n'), ((5053, 5130), 'os.path.join', 'os.path.join', (['Base_Dir', 'f"""Annual_reports/{company}_report/{company}_2022.txt"""'], {}), "(Base_Dir, f'Annual_reports/{company}_report/{company}_2022.txt')\n", (5065, 5130), False, 'import os\n'), ((5262, 5307), 'os.path.join', 'os.path.join', (['Base_Dir', 'f"""{company}_2022.pdf"""'], {}), "(Base_Dir, f'{company}_2022.pdf')\n", (5274, 5307), False, 'import os\n'), ((5315, 5334), 'os.remove', 'os.remove', (['pdf_path'], {}), '(pdf_path)\n', (5324, 5334), False, 'import os\n'), ((5907, 5934), 'os.path.exists', 'os.path.exists', (['vstore_path'], {}), '(vstore_path)\n', (5921, 5934), False, 'import os\n'), ((5945, 5966), 'os.mkdir', 'os.mkdir', (['vstore_path'], {}), '(vstore_path)\n', (5953, 5966), False, 'import os\n'), ((6092, 6163), 'llama_index.PromptHelper', 'PromptHelper', (['max_input', 'max_chunk_overlap'], {'chunk_size_limit': 'chunk_size'}), '(max_input, max_chunk_overlap, chunk_size_limit=chunk_size)\n', (6104, 6163), False, 'from llama_index import SimpleDirectoryReader, GPTListIndex, GPTVectorStoreIndex, LLMPredictor, PromptHelper, ServiceContext, StorageContext\n'), ((6409, 6499), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llmPredictor', 'prompt_helper': 'promptHelpter'}), '(llm_predictor=llmPredictor, prompt_helper=\n promptHelpter)\n', (6437, 6499), False, 'from llama_index import SimpleDirectoryReader, GPTListIndex, GPTVectorStoreIndex, LLMPredictor, PromptHelper, ServiceContext, StorageContext\n'), ((6548, 6598), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', ([], {'documents': 'docs'}), '(documents=docs)\n', (6582, 6598), False, 'from llama_index import SimpleDirectoryReader, GPTListIndex, GPTVectorStoreIndex, LLMPredictor, PromptHelper, ServiceContext, StorageContext\n'), ((2964, 2981), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (2976, 2981), False, 'import requests\n'), ((4028, 4105), 'os.path.join', 'os.path.join', (['Base_Dir', 'f"""Annual_reports/{company}_report/{company}_2022.txt"""'], {}), "(Base_Dir, f'Annual_reports/{company}_report/{company}_2022.txt')\n", (4040, 4105), False, 'import os\n'), ((4437, 4456), 'PyPDF2.PdfReader', 'PdfReader', (['pdf_file'], {}), '(pdf_file)\n', (4446, 4456), False, 'from PyPDF2 import PdfReader, PdfWriter\n'), ((4510, 4521), 'PyPDF2.PdfWriter', 'PdfWriter', ([], {}), '()\n', (4519, 4521), False, 'from PyPDF2 import PdfReader, PdfWriter\n'), ((6243, 6310), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model_name': '"""text-ada-001"""', 'max_tokens': 'tokens'}), "(temperature=0, model_name='text-ada-001', max_tokens=tokens)\n", (6249, 6310), False, 'from langchain import OpenAI\n'), ((6332, 6363), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['doc_path'], {}), '(doc_path)\n', (6353, 6363), False, 'from llama_index import SimpleDirectoryReader, GPTListIndex, GPTVectorStoreIndex, LLMPredictor, PromptHelper, ServiceContext, StorageContext\n')]
"""FastAPI app creation, logger configuration and main API routes.""" from typing import Any import llama_index from fastapi import FastAPI from fastapi.openapi.utils import get_openapi from private_gpt.paths import docs_path from private_gpt.server.chat.chat_router import chat_router from private_gpt.server.chunks.chunks_router import chunks_router from private_gpt.server.completions.completions_router import completions_router from private_gpt.server.embeddings.embeddings_router import embeddings_router from private_gpt.server.health.health_router import health_router from private_gpt.server.ingest.ingest_router import ingest_router from private_gpt.settings.settings import settings # Add LlamaIndex simple observability llama_index.set_global_handler("simple") # Start the API with open(docs_path / "description.md") as description_file: description = description_file.read() tags_metadata = [ { "name": "Ingestion", "description": "High-level APIs covering document ingestion -internally " "managing document parsing, splitting," "metadata extraction, embedding generation and storage- and ingested " "documents CRUD." "Each ingested document is identified by an ID that can be used to filter the " "context" "used in *Contextual Completions* and *Context Chunks* APIs.", }, { "name": "Contextual Completions", "description": "High-level APIs covering contextual Chat and Completions. They " "follow OpenAI's format, extending it to " "allow using the context coming from ingested documents to create the " "response. Internally" "manage context retrieval, prompt engineering and the response generation.", }, { "name": "Context Chunks", "description": "Low-level API that given a query return relevant chunks of " "text coming from the ingested" "documents.", }, { "name": "Embeddings", "description": "Low-level API to obtain the vector representation of a given " "text, using an Embeddings model." "Follows OpenAI's embeddings API format.", }, { "name": "Health", "description": "Simple health API to make sure the server is up and running.", }, ] app = FastAPI() def custom_openapi() -> dict[str, Any]: if app.openapi_schema: return app.openapi_schema openapi_schema = get_openapi( title="PrivateGPT", description=description, version="0.1.0", summary="PrivateGPT is a production-ready AI project that allows you to " "ask questions to your documents using the power of Large Language " "Models (LLMs), even in scenarios without Internet connection. " "100% private, no data leaves your execution environment at any point.", contact={ "url": "https://github.com/imartinez/privateGPT", }, license_info={ "name": "Apache 2.0", "url": "https://www.apache.org/licenses/LICENSE-2.0.html", }, routes=app.routes, tags=tags_metadata, ) openapi_schema["info"]["x-logo"] = { "url": "https://lh3.googleusercontent.com/drive-viewer" "/AK7aPaD_iNlMoTquOBsw4boh4tIYxyEuhz6EtEs8nzq3yNkNAK00xGj" "E1KUCmPJSk3TYOjcs6tReG6w_cLu1S7L_gPgT9z52iw=s2560" } app.openapi_schema = openapi_schema return app.openapi_schema app.openapi = custom_openapi # type: ignore[method-assign] app.include_router(completions_router) app.include_router(chat_router) app.include_router(chunks_router) app.include_router(ingest_router) app.include_router(embeddings_router) app.include_router(health_router) if settings.ui.enabled: from private_gpt.ui.ui import mount_in_app mount_in_app(app)
[ "llama_index.set_global_handler" ]
[((735, 775), 'llama_index.set_global_handler', 'llama_index.set_global_handler', (['"""simple"""'], {}), "('simple')\n", (765, 775), False, 'import llama_index\n'), ((2313, 2322), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (2320, 2322), False, 'from fastapi import FastAPI\n'), ((2447, 3013), 'fastapi.openapi.utils.get_openapi', 'get_openapi', ([], {'title': '"""PrivateGPT"""', 'description': 'description', 'version': '"""0.1.0"""', 'summary': '"""PrivateGPT is a production-ready AI project that allows you to ask questions to your documents using the power of Large Language Models (LLMs), even in scenarios without Internet connection. 100% private, no data leaves your execution environment at any point."""', 'contact': "{'url': 'https://github.com/imartinez/privateGPT'}", 'license_info': "{'name': 'Apache 2.0', 'url':\n 'https://www.apache.org/licenses/LICENSE-2.0.html'}", 'routes': 'app.routes', 'tags': 'tags_metadata'}), "(title='PrivateGPT', description=description, version='0.1.0',\n summary=\n 'PrivateGPT is a production-ready AI project that allows you to ask questions to your documents using the power of Large Language Models (LLMs), even in scenarios without Internet connection. 100% private, no data leaves your execution environment at any point.'\n , contact={'url': 'https://github.com/imartinez/privateGPT'},\n license_info={'name': 'Apache 2.0', 'url':\n 'https://www.apache.org/licenses/LICENSE-2.0.html'}, routes=app.routes,\n tags=tags_metadata)\n", (2458, 3013), False, 'from fastapi.openapi.utils import get_openapi\n'), ((3811, 3828), 'private_gpt.ui.ui.mount_in_app', 'mount_in_app', (['app'], {}), '(app)\n', (3823, 3828), False, 'from private_gpt.ui.ui import mount_in_app\n')]
import llama_index from .di import global_injector from .launcher import create_app llama_index.set_global_handler("simple") app = create_app(global_injector)
[ "llama_index.set_global_handler" ]
[((86, 126), 'llama_index.set_global_handler', 'llama_index.set_global_handler', (['"""simple"""'], {}), "('simple')\n", (116, 126), False, 'import llama_index\n')]
import logging from dataclasses import dataclass from typing import List, Optional import llama_index from llama_index.bridge.pydantic import BaseModel from llama_index.callbacks.base import CallbackManager from llama_index.embeddings.base import BaseEmbedding from llama_index.embeddings.utils import EmbedType, resolve_embed_model from llama_index.indices.prompt_helper import PromptHelper from llama_index.llm_predictor import LLMPredictor from llama_index.llm_predictor.base import BaseLLMPredictor, LLMMetadata from llama_index.llms.llm import LLM from llama_index.llms.utils import LLMType, resolve_llm from llama_index.logger import LlamaLogger from llama_index.node_parser.interface import NodeParser, TextSplitter from llama_index.node_parser.text.sentence import ( DEFAULT_CHUNK_SIZE, SENTENCE_CHUNK_OVERLAP, SentenceSplitter, ) from llama_index.prompts.base import BasePromptTemplate from llama_index.schema import TransformComponent from llama_index.types import PydanticProgramMode logger = logging.getLogger(__name__) def _get_default_node_parser( chunk_size: int = DEFAULT_CHUNK_SIZE, chunk_overlap: int = SENTENCE_CHUNK_OVERLAP, callback_manager: Optional[CallbackManager] = None, ) -> NodeParser: """Get default node parser.""" return SentenceSplitter( chunk_size=chunk_size, chunk_overlap=chunk_overlap, callback_manager=callback_manager or CallbackManager(), ) def _get_default_prompt_helper( llm_metadata: LLMMetadata, context_window: Optional[int] = None, num_output: Optional[int] = None, ) -> PromptHelper: """Get default prompt helper.""" if context_window is not None: llm_metadata.context_window = context_window if num_output is not None: llm_metadata.num_output = num_output return PromptHelper.from_llm_metadata(llm_metadata=llm_metadata) class ServiceContextData(BaseModel): llm: dict llm_predictor: dict prompt_helper: dict embed_model: dict transformations: List[dict] @dataclass class ServiceContext: """Service Context container. The service context container is a utility container for LlamaIndex index and query classes. It contains the following: - llm_predictor: BaseLLMPredictor - prompt_helper: PromptHelper - embed_model: BaseEmbedding - node_parser: NodeParser - llama_logger: LlamaLogger (deprecated) - callback_manager: CallbackManager """ llm_predictor: BaseLLMPredictor prompt_helper: PromptHelper embed_model: BaseEmbedding transformations: List[TransformComponent] llama_logger: LlamaLogger callback_manager: CallbackManager @classmethod def from_defaults( cls, llm_predictor: Optional[BaseLLMPredictor] = None, llm: Optional[LLMType] = "default", prompt_helper: Optional[PromptHelper] = None, embed_model: Optional[EmbedType] = "default", node_parser: Optional[NodeParser] = None, text_splitter: Optional[TextSplitter] = None, transformations: Optional[List[TransformComponent]] = None, llama_logger: Optional[LlamaLogger] = None, callback_manager: Optional[CallbackManager] = None, system_prompt: Optional[str] = None, query_wrapper_prompt: Optional[BasePromptTemplate] = None, # pydantic program mode (used if output_cls is specified) pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT, # node parser kwargs chunk_size: Optional[int] = None, chunk_overlap: Optional[int] = None, # prompt helper kwargs context_window: Optional[int] = None, num_output: Optional[int] = None, # deprecated kwargs chunk_size_limit: Optional[int] = None, ) -> "ServiceContext": """Create a ServiceContext from defaults. If an argument is specified, then use the argument value provided for that parameter. If an argument is not specified, then use the default value. You can change the base defaults by setting llama_index.global_service_context to a ServiceContext object with your desired settings. Args: llm_predictor (Optional[BaseLLMPredictor]): LLMPredictor prompt_helper (Optional[PromptHelper]): PromptHelper embed_model (Optional[BaseEmbedding]): BaseEmbedding or "local" (use local model) node_parser (Optional[NodeParser]): NodeParser llama_logger (Optional[LlamaLogger]): LlamaLogger (deprecated) chunk_size (Optional[int]): chunk_size callback_manager (Optional[CallbackManager]): CallbackManager system_prompt (Optional[str]): System-wide prompt to be prepended to all input prompts, used to guide system "decision making" query_wrapper_prompt (Optional[BasePromptTemplate]): A format to wrap passed-in input queries. Deprecated Args: chunk_size_limit (Optional[int]): renamed to chunk_size """ if chunk_size_limit is not None and chunk_size is None: logger.warning( "chunk_size_limit is deprecated, please specify chunk_size instead" ) chunk_size = chunk_size_limit if llama_index.global_service_context is not None: return cls.from_service_context( llama_index.global_service_context, llm=llm, llm_predictor=llm_predictor, prompt_helper=prompt_helper, embed_model=embed_model, node_parser=node_parser, text_splitter=text_splitter, llama_logger=llama_logger, callback_manager=callback_manager, context_window=context_window, chunk_size=chunk_size, chunk_size_limit=chunk_size_limit, chunk_overlap=chunk_overlap, num_output=num_output, system_prompt=system_prompt, query_wrapper_prompt=query_wrapper_prompt, transformations=transformations, ) callback_manager = callback_manager or CallbackManager([]) if llm != "default": if llm_predictor is not None: raise ValueError("Cannot specify both llm and llm_predictor") llm = resolve_llm(llm) llm.system_prompt = llm.system_prompt or system_prompt llm.query_wrapper_prompt = llm.query_wrapper_prompt or query_wrapper_prompt llm.pydantic_program_mode = ( llm.pydantic_program_mode or pydantic_program_mode ) if llm_predictor is not None: print("LLMPredictor is deprecated, please use LLM instead.") llm_predictor = llm_predictor or LLMPredictor( llm=llm, pydantic_program_mode=pydantic_program_mode ) if isinstance(llm_predictor, LLMPredictor): llm_predictor.llm.callback_manager = callback_manager if system_prompt: llm_predictor.system_prompt = system_prompt if query_wrapper_prompt: llm_predictor.query_wrapper_prompt = query_wrapper_prompt # NOTE: the embed_model isn't used in all indices # NOTE: embed model should be a transformation, but the way the service # context works, we can't put in there yet. embed_model = resolve_embed_model(embed_model) embed_model.callback_manager = callback_manager prompt_helper = prompt_helper or _get_default_prompt_helper( llm_metadata=llm_predictor.metadata, context_window=context_window, num_output=num_output, ) if text_splitter is not None and node_parser is not None: raise ValueError("Cannot specify both text_splitter and node_parser") node_parser = ( text_splitter # text splitter extends node parser or node_parser or _get_default_node_parser( chunk_size=chunk_size or DEFAULT_CHUNK_SIZE, chunk_overlap=chunk_overlap or SENTENCE_CHUNK_OVERLAP, callback_manager=callback_manager, ) ) transformations = transformations or [node_parser] llama_logger = llama_logger or LlamaLogger() return cls( llm_predictor=llm_predictor, embed_model=embed_model, prompt_helper=prompt_helper, transformations=transformations, llama_logger=llama_logger, # deprecated callback_manager=callback_manager, ) @classmethod def from_service_context( cls, service_context: "ServiceContext", llm_predictor: Optional[BaseLLMPredictor] = None, llm: Optional[LLMType] = "default", prompt_helper: Optional[PromptHelper] = None, embed_model: Optional[EmbedType] = "default", node_parser: Optional[NodeParser] = None, text_splitter: Optional[TextSplitter] = None, transformations: Optional[List[TransformComponent]] = None, llama_logger: Optional[LlamaLogger] = None, callback_manager: Optional[CallbackManager] = None, system_prompt: Optional[str] = None, query_wrapper_prompt: Optional[BasePromptTemplate] = None, # node parser kwargs chunk_size: Optional[int] = None, chunk_overlap: Optional[int] = None, # prompt helper kwargs context_window: Optional[int] = None, num_output: Optional[int] = None, # deprecated kwargs chunk_size_limit: Optional[int] = None, ) -> "ServiceContext": """Instantiate a new service context using a previous as the defaults.""" if chunk_size_limit is not None and chunk_size is None: logger.warning( "chunk_size_limit is deprecated, please specify chunk_size", DeprecationWarning, ) chunk_size = chunk_size_limit callback_manager = callback_manager or service_context.callback_manager if llm != "default": if llm_predictor is not None: raise ValueError("Cannot specify both llm and llm_predictor") llm = resolve_llm(llm) llm_predictor = LLMPredictor(llm=llm) llm_predictor = llm_predictor or service_context.llm_predictor if isinstance(llm_predictor, LLMPredictor): llm_predictor.llm.callback_manager = callback_manager if system_prompt: llm_predictor.system_prompt = system_prompt if query_wrapper_prompt: llm_predictor.query_wrapper_prompt = query_wrapper_prompt # NOTE: the embed_model isn't used in all indices # default to using the embed model passed from the service context if embed_model == "default": embed_model = service_context.embed_model embed_model = resolve_embed_model(embed_model) embed_model.callback_manager = callback_manager prompt_helper = prompt_helper or service_context.prompt_helper if context_window is not None or num_output is not None: prompt_helper = _get_default_prompt_helper( llm_metadata=llm_predictor.metadata, context_window=context_window, num_output=num_output, ) transformations = transformations or [] node_parser_found = False for transform in service_context.transformations: if isinstance(transform, NodeParser): node_parser_found = True node_parser = transform break if text_splitter is not None and node_parser is not None: raise ValueError("Cannot specify both text_splitter and node_parser") if not node_parser_found: node_parser = ( text_splitter # text splitter extends node parser or node_parser or _get_default_node_parser( chunk_size=chunk_size or DEFAULT_CHUNK_SIZE, chunk_overlap=chunk_overlap or SENTENCE_CHUNK_OVERLAP, callback_manager=callback_manager, ) ) transformations = transformations or service_context.transformations llama_logger = llama_logger or service_context.llama_logger return cls( llm_predictor=llm_predictor, embed_model=embed_model, prompt_helper=prompt_helper, transformations=transformations, llama_logger=llama_logger, # deprecated callback_manager=callback_manager, ) @property def llm(self) -> LLM: return self.llm_predictor.llm @property def node_parser(self) -> NodeParser: """Get the node parser.""" for transform in self.transformations: if isinstance(transform, NodeParser): return transform raise ValueError("No node parser found.") def to_dict(self) -> dict: """Convert service context to dict.""" llm_dict = self.llm_predictor.llm.to_dict() llm_predictor_dict = self.llm_predictor.to_dict() embed_model_dict = self.embed_model.to_dict() prompt_helper_dict = self.prompt_helper.to_dict() tranform_list_dict = [x.to_dict() for x in self.transformations] return ServiceContextData( llm=llm_dict, llm_predictor=llm_predictor_dict, prompt_helper=prompt_helper_dict, embed_model=embed_model_dict, transformations=tranform_list_dict, ).dict() @classmethod def from_dict(cls, data: dict) -> "ServiceContext": from llama_index.embeddings.loading import load_embed_model from llama_index.extractors.loading import load_extractor from llama_index.llm_predictor.loading import load_predictor from llama_index.node_parser.loading import load_parser service_context_data = ServiceContextData.parse_obj(data) llm_predictor = load_predictor(service_context_data.llm_predictor) embed_model = load_embed_model(service_context_data.embed_model) prompt_helper = PromptHelper.from_dict(service_context_data.prompt_helper) transformations: List[TransformComponent] = [] for transform in service_context_data.transformations: try: transformations.append(load_parser(transform)) except ValueError: transformations.append(load_extractor(transform)) return cls.from_defaults( llm_predictor=llm_predictor, prompt_helper=prompt_helper, embed_model=embed_model, transformations=transformations, ) def set_global_service_context(service_context: Optional[ServiceContext]) -> None: """Helper function to set the global service context.""" llama_index.global_service_context = service_context
[ "llama_index.llm_predictor.loading.load_predictor", "llama_index.embeddings.loading.load_embed_model", "llama_index.logger.LlamaLogger", "llama_index.llms.utils.resolve_llm", "llama_index.node_parser.loading.load_parser", "llama_index.callbacks.base.CallbackManager", "llama_index.indices.prompt_helper.PromptHelper.from_dict", "llama_index.extractors.loading.load_extractor", "llama_index.llm_predictor.LLMPredictor", "llama_index.indices.prompt_helper.PromptHelper.from_llm_metadata", "llama_index.embeddings.utils.resolve_embed_model" ]
[((1018, 1045), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1035, 1045), False, 'import logging\n'), ((1820, 1877), 'llama_index.indices.prompt_helper.PromptHelper.from_llm_metadata', 'PromptHelper.from_llm_metadata', ([], {'llm_metadata': 'llm_metadata'}), '(llm_metadata=llm_metadata)\n', (1850, 1877), False, 'from llama_index.indices.prompt_helper import PromptHelper\n'), ((7504, 7536), 'llama_index.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (7523, 7536), False, 'from llama_index.embeddings.utils import EmbedType, resolve_embed_model\n'), ((11065, 11097), 'llama_index.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (11084, 11097), False, 'from llama_index.embeddings.utils import EmbedType, resolve_embed_model\n'), ((14239, 14289), 'llama_index.llm_predictor.loading.load_predictor', 'load_predictor', (['service_context_data.llm_predictor'], {}), '(service_context_data.llm_predictor)\n', (14253, 14289), False, 'from llama_index.llm_predictor.loading import load_predictor\n'), ((14313, 14363), 'llama_index.embeddings.loading.load_embed_model', 'load_embed_model', (['service_context_data.embed_model'], {}), '(service_context_data.embed_model)\n', (14329, 14363), False, 'from llama_index.embeddings.loading import load_embed_model\n'), ((14389, 14447), 'llama_index.indices.prompt_helper.PromptHelper.from_dict', 'PromptHelper.from_dict', (['service_context_data.prompt_helper'], {}), '(service_context_data.prompt_helper)\n', (14411, 14447), False, 'from llama_index.indices.prompt_helper import PromptHelper\n'), ((6248, 6267), 'llama_index.callbacks.base.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (6263, 6267), False, 'from llama_index.callbacks.base import CallbackManager\n'), ((6435, 6451), 'llama_index.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (6446, 6451), False, 'from llama_index.llms.utils import LLMType, resolve_llm\n'), ((6883, 6949), 'llama_index.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm', 'pydantic_program_mode': 'pydantic_program_mode'}), '(llm=llm, pydantic_program_mode=pydantic_program_mode)\n', (6895, 6949), False, 'from llama_index.llm_predictor import LLMPredictor\n'), ((8412, 8425), 'llama_index.logger.LlamaLogger', 'LlamaLogger', ([], {}), '()\n', (8423, 8425), False, 'from llama_index.logger import LlamaLogger\n'), ((10360, 10376), 'llama_index.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (10371, 10376), False, 'from llama_index.llms.utils import LLMType, resolve_llm\n'), ((10405, 10426), 'llama_index.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (10417, 10426), False, 'from llama_index.llm_predictor import LLMPredictor\n'), ((1419, 1436), 'llama_index.callbacks.base.CallbackManager', 'CallbackManager', ([], {}), '()\n', (1434, 1436), False, 'from llama_index.callbacks.base import CallbackManager\n'), ((14623, 14645), 'llama_index.node_parser.loading.load_parser', 'load_parser', (['transform'], {}), '(transform)\n', (14634, 14645), False, 'from llama_index.node_parser.loading import load_parser\n'), ((14717, 14742), 'llama_index.extractors.loading.load_extractor', 'load_extractor', (['transform'], {}), '(transform)\n', (14731, 14742), False, 'from llama_index.extractors.loading import load_extractor\n')]
"""FastAPI app creation, logger configuration and main API routes.""" import llama_index from auth_RAG.di import global_injector from auth_RAG.launcher import create_app # Add LlamaIndex simple observability llama_index.set_global_handler("simple") app = create_app(global_injector)
[ "llama_index.set_global_handler" ]
[((211, 251), 'llama_index.set_global_handler', 'llama_index.set_global_handler', (['"""simple"""'], {}), "('simple')\n", (241, 251), False, 'import llama_index\n'), ((259, 286), 'auth_RAG.launcher.create_app', 'create_app', (['global_injector'], {}), '(global_injector)\n', (269, 286), False, 'from auth_RAG.launcher import create_app\n')]
import llama_index from pydantic import BaseModel from typing import List from typing import Optional class Section(BaseModel): section_id: str section_text: str vector_representation: Optional[List[float]] keywords: Optional[List[str]] named_entities: Optional[List[str]] summary: Optional[str] sentiment: Optional[float] document_id: str def to_llama_format(self): """Converts the CogniSphere's Section into a llama's Document format.""" extra_info = { "section_id": self.section_id or "", "document_id": self.document_id or "", "summary": self.summary or "", "sentiment": self.sentiment or "", "keywords": ", ".join(self.keywords) if self.keywords else "", "named_entities": ", ".join(self.named_entities) if self.named_entities else "", } return llama_index.Document( text=self.section_text or "", doc_id=f"{self.document_id}-{self.section_id}" if self.document_id and self.section_id else "", extra_info=extra_info, embedding=self.vector_representation or [], ) class Document(BaseModel): id: str document_id: str title: Optional[str] author: Optional[str] publication_date: Optional[str] genre: Optional[str] publisher: Optional[str] language: Optional[str] isbn: Optional[str] summary: Optional[str] vector_representation: Optional[List[float]] sections: List[Section] def get_section_keywords(self): section_keywords = [ keyword for section in self.sections if section.keywords for keyword in section.keywords ] # remove duplicates return list(set(section_keywords)) def get_section_named_entities(self): section_named_entities = [ entity for section in self.sections if section.named_entities for entity in section.named_entities ] # remove duplicates return list(set(section_named_entities)) def get_text(self): return " ".join(section.section_text for section in self.sections)
[ "llama_index.Document" ]
[((918, 1146), 'llama_index.Document', 'llama_index.Document', ([], {'text': "(self.section_text or '')", 'doc_id': "(f'{self.document_id}-{self.section_id}' if self.document_id and self.\n section_id else '')", 'extra_info': 'extra_info', 'embedding': '(self.vector_representation or [])'}), "(text=self.section_text or '', doc_id=\n f'{self.document_id}-{self.section_id}' if self.document_id and self.\n section_id else '', extra_info=extra_info, embedding=self.\n vector_representation or [])\n", (938, 1146), False, 'import llama_index\n')]
## create graph from pyvis.network import Network import llama_index.core from llama_index.core import StorageContext, load_index_from_storage storage_context = StorageContext.from_defaults(persist_dir="math_index_persist") index = load_index_from_storage(storage_context) # retriever = llama_index.core.indices.knowledge_graph.KGTableRetriever(index) g = index.get_networkx_graph() net = Network(notebook=True, cdn_resources="in_line", directed=True) net.from_nx(g) net.show("example.html")
[ "llama_index.core.StorageContext.from_defaults", "llama_index.core.load_index_from_storage" ]
[((162, 224), 'llama_index.core.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""math_index_persist"""'}), "(persist_dir='math_index_persist')\n", (190, 224), False, 'from llama_index.core import StorageContext, load_index_from_storage\n'), ((233, 273), 'llama_index.core.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (256, 273), False, 'from llama_index.core import StorageContext, load_index_from_storage\n'), ((391, 453), 'pyvis.network.Network', 'Network', ([], {'notebook': '(True)', 'cdn_resources': '"""in_line"""', 'directed': '(True)'}), "(notebook=True, cdn_resources='in_line', directed=True)\n", (398, 453), False, 'from pyvis.network import Network\n')]
from dataclasses import dataclass from typing import TYPE_CHECKING, Any, Callable, List, Optional if TYPE_CHECKING: from llama_index import ServiceContext from llama_index.core.base.embeddings.base import BaseEmbedding from llama_index.core.callbacks.base import BaseCallbackHandler, CallbackManager from llama_index.core.embeddings.utils import EmbedType, resolve_embed_model from llama_index.core.indices.prompt_helper import PromptHelper from llama_index.core.llms import LLM from llama_index.core.llms.utils import LLMType, resolve_llm from llama_index.core.node_parser import NodeParser, SentenceSplitter from llama_index.core.schema import TransformComponent from llama_index.core.types import PydanticProgramMode from llama_index.core.utils import get_tokenizer, set_global_tokenizer @dataclass class _Settings: """Settings for the Llama Index, lazily initialized.""" # lazy initialization _llm: Optional[LLM] = None _embed_model: Optional[BaseEmbedding] = None _callback_manager: Optional[CallbackManager] = None _tokenizer: Optional[Callable[[str], List[Any]]] = None _node_parser: Optional[NodeParser] = None _prompt_helper: Optional[PromptHelper] = None _transformations: Optional[List[TransformComponent]] = None # ---- LLM ---- @property def llm(self) -> LLM: """Get the LLM.""" if self._llm is None: self._llm = resolve_llm("default") if self._callback_manager is not None: self._llm.callback_manager = self._callback_manager return self._llm @llm.setter def llm(self, llm: LLMType) -> None: """Set the LLM.""" self._llm = resolve_llm(llm) @property def pydantic_program_mode(self) -> PydanticProgramMode: """Get the pydantic program mode.""" return self.llm.pydantic_program_mode @pydantic_program_mode.setter def pydantic_program_mode(self, pydantic_program_mode: PydanticProgramMode) -> None: """Set the pydantic program mode.""" self.llm.pydantic_program_mode = pydantic_program_mode # ---- Embedding ---- @property def embed_model(self) -> BaseEmbedding: """Get the embedding model.""" if self._embed_model is None: self._embed_model = resolve_embed_model("default") if self._callback_manager is not None: self._embed_model.callback_manager = self._callback_manager return self._embed_model @embed_model.setter def embed_model(self, embed_model: EmbedType) -> None: """Set the embedding model.""" self._embed_model = resolve_embed_model(embed_model) # ---- Callbacks ---- @property def global_handler(self) -> Optional[BaseCallbackHandler]: """Get the global handler.""" import llama_index.core # TODO: deprecated? return llama_index.global_handler @global_handler.setter def global_handler(self, eval_mode: str, **eval_params: Any) -> None: """Set the global handler.""" from llama_index import set_global_handler # TODO: deprecated? set_global_handler(eval_mode, **eval_params) @property def callback_manager(self) -> CallbackManager: """Get the callback manager.""" if self._callback_manager is None: self._callback_manager = CallbackManager() return self._callback_manager @callback_manager.setter def callback_manager(self, callback_manager: CallbackManager) -> None: """Set the callback manager.""" self._callback_manager = callback_manager # ---- Tokenizer ---- @property def tokenizer(self) -> Callable[[str], List[Any]]: """Get the tokenizer.""" import llama_index.core if llama_index.global_tokenizer is None: return get_tokenizer() # TODO: deprecated? return llama_index.global_tokenizer @tokenizer.setter def tokenizer(self, tokenizer: Callable[[str], List[Any]]) -> None: """Set the tokenizer.""" try: from transformers import PreTrainedTokenizerBase # pants: no-infer-dep if isinstance(tokenizer, PreTrainedTokenizerBase): from functools import partial tokenizer = partial(tokenizer.encode, add_special_tokens=False) except ImportError: pass # TODO: deprecated? set_global_tokenizer(tokenizer) # ---- Node parser ---- @property def node_parser(self) -> NodeParser: """Get the node parser.""" if self._node_parser is None: self._node_parser = SentenceSplitter() if self._callback_manager is not None: self._node_parser.callback_manager = self._callback_manager return self._node_parser @node_parser.setter def node_parser(self, node_parser: NodeParser) -> None: """Set the node parser.""" self._node_parser = node_parser @property def chunk_size(self) -> int: """Get the chunk size.""" if hasattr(self.node_parser, "chunk_size"): return self.node_parser.chunk_size else: raise ValueError("Configured node parser does not have chunk size.") @chunk_size.setter def chunk_size(self, chunk_size: int) -> None: """Set the chunk size.""" if hasattr(self.node_parser, "chunk_size"): self.node_parser.chunk_size = chunk_size else: raise ValueError("Configured node parser does not have chunk size.") @property def chunk_overlap(self) -> int: """Get the chunk overlap.""" if hasattr(self.node_parser, "chunk_overlap"): return self.node_parser.chunk_overlap else: raise ValueError("Configured node parser does not have chunk overlap.") @chunk_overlap.setter def chunk_overlap(self, chunk_overlap: int) -> None: """Set the chunk overlap.""" if hasattr(self.node_parser, "chunk_overlap"): self.node_parser.chunk_overlap = chunk_overlap else: raise ValueError("Configured node parser does not have chunk overlap.") # ---- Node parser alias ---- @property def text_splitter(self) -> NodeParser: """Get the text splitter.""" return self.node_parser @text_splitter.setter def text_splitter(self, text_splitter: NodeParser) -> None: """Set the text splitter.""" self.node_parser = text_splitter # ---- Prompt helper ---- @property def prompt_helper(self) -> PromptHelper: """Get the prompt helper.""" if self._llm is not None and self._prompt_helper is None: self._prompt_helper = PromptHelper.from_llm_metadata(self._llm.metadata) elif self._prompt_helper is None: self._prompt_helper = PromptHelper() return self._prompt_helper @prompt_helper.setter def prompt_helper(self, prompt_helper: PromptHelper) -> None: """Set the prompt helper.""" self._prompt_helper = prompt_helper @property def num_output(self) -> int: """Get the number of outputs.""" return self.prompt_helper.num_output @num_output.setter def num_output(self, num_output: int) -> None: """Set the number of outputs.""" self.prompt_helper.num_output = num_output @property def context_window(self) -> int: """Get the context window.""" return self.prompt_helper.context_window @context_window.setter def context_window(self, context_window: int) -> None: """Set the context window.""" self.prompt_helper.context_window = context_window # ---- Transformations ---- @property def transformations(self) -> List[TransformComponent]: """Get the transformations.""" if self._transformations is None: self._transformations = [self.node_parser] return self._transformations @transformations.setter def transformations(self, transformations: List[TransformComponent]) -> None: """Set the transformations.""" self._transformations = transformations # Singleton Settings = _Settings() # -- Helper functions for deprecation/migration -- def llm_from_settings_or_context( settings: _Settings, context: Optional["ServiceContext"] ) -> LLM: """Get settings from either settings or context.""" if context is not None: return context.llm return settings.llm def embed_model_from_settings_or_context( settings: _Settings, context: Optional["ServiceContext"] ) -> BaseEmbedding: """Get settings from either settings or context.""" if context is not None: return context.embed_model return settings.embed_model def callback_manager_from_settings_or_context( settings: _Settings, context: Optional["ServiceContext"] ) -> CallbackManager: """Get settings from either settings or context.""" if context is not None: return context.callback_manager return settings.callback_manager def node_parser_from_settings_or_context( settings: _Settings, context: Optional["ServiceContext"] ) -> NodeParser: """Get settings from either settings or context.""" if context is not None: return context.node_parser return settings.node_parser def prompt_helper_from_settings_or_context( settings: _Settings, context: Optional["ServiceContext"] ) -> PromptHelper: """Get settings from either settings or context.""" if context is not None: return context.prompt_helper return settings.prompt_helper def transformations_from_settings_or_context( settings: _Settings, context: Optional["ServiceContext"] ) -> List[TransformComponent]: """Get settings from either settings or context.""" if context is not None: return context.transformations return settings.transformations
[ "llama_index.core.embeddings.utils.resolve_embed_model", "llama_index.core.node_parser.SentenceSplitter", "llama_index.core.callbacks.base.CallbackManager", "llama_index.set_global_handler", "llama_index.core.indices.prompt_helper.PromptHelper.from_llm_metadata", "llama_index.core.utils.get_tokenizer", "llama_index.core.llms.utils.resolve_llm", "llama_index.core.indices.prompt_helper.PromptHelper", "llama_index.core.utils.set_global_tokenizer" ]
[((1680, 1696), 'llama_index.core.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (1691, 1696), False, 'from llama_index.core.llms.utils import LLMType, resolve_llm\n'), ((2626, 2658), 'llama_index.core.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (2645, 2658), False, 'from llama_index.core.embeddings.utils import EmbedType, resolve_embed_model\n'), ((3133, 3177), 'llama_index.set_global_handler', 'set_global_handler', (['eval_mode'], {}), '(eval_mode, **eval_params)\n', (3151, 3177), False, 'from llama_index import set_global_handler\n'), ((4433, 4464), 'llama_index.core.utils.set_global_tokenizer', 'set_global_tokenizer', (['tokenizer'], {}), '(tokenizer)\n', (4453, 4464), False, 'from llama_index.core.utils import get_tokenizer, set_global_tokenizer\n'), ((1414, 1436), 'llama_index.core.llms.utils.resolve_llm', 'resolve_llm', (['"""default"""'], {}), "('default')\n", (1425, 1436), False, 'from llama_index.core.llms.utils import LLMType, resolve_llm\n'), ((2290, 2320), 'llama_index.core.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['"""default"""'], {}), "('default')\n", (2309, 2320), False, 'from llama_index.core.embeddings.utils import EmbedType, resolve_embed_model\n'), ((3364, 3381), 'llama_index.core.callbacks.base.CallbackManager', 'CallbackManager', ([], {}), '()\n', (3379, 3381), False, 'from llama_index.core.callbacks.base import BaseCallbackHandler, CallbackManager\n'), ((3846, 3861), 'llama_index.core.utils.get_tokenizer', 'get_tokenizer', ([], {}), '()\n', (3859, 3861), False, 'from llama_index.core.utils import get_tokenizer, set_global_tokenizer\n'), ((4655, 4673), 'llama_index.core.node_parser.SentenceSplitter', 'SentenceSplitter', ([], {}), '()\n', (4671, 4673), False, 'from llama_index.core.node_parser import NodeParser, SentenceSplitter\n'), ((6756, 6806), 'llama_index.core.indices.prompt_helper.PromptHelper.from_llm_metadata', 'PromptHelper.from_llm_metadata', (['self._llm.metadata'], {}), '(self._llm.metadata)\n', (6786, 6806), False, 'from llama_index.core.indices.prompt_helper import PromptHelper\n'), ((4299, 4350), 'functools.partial', 'partial', (['tokenizer.encode'], {'add_special_tokens': '(False)'}), '(tokenizer.encode, add_special_tokens=False)\n', (4306, 4350), False, 'from functools import partial\n'), ((6883, 6897), 'llama_index.core.indices.prompt_helper.PromptHelper', 'PromptHelper', ([], {}), '()\n', (6895, 6897), False, 'from llama_index.core.indices.prompt_helper import PromptHelper\n')]
"""Download.""" import json import logging import os import subprocess import sys from enum import Enum from importlib import util from pathlib import Path from typing import Any, Dict, List, Optional, Union import pkg_resources import requests from pkg_resources import DistributionNotFound from llama_index.download.utils import ( get_exports, get_file_content, initialize_directory, rewrite_exports, ) LLAMA_HUB_CONTENTS_URL = f"https://raw.githubusercontent.com/run-llama/llama-hub/main" LLAMA_HUB_PATH = "/llama_hub" LLAMA_HUB_URL = LLAMA_HUB_CONTENTS_URL + LLAMA_HUB_PATH PATH_TYPE = Union[str, Path] logger = logging.getLogger(__name__) LLAMAHUB_ANALYTICS_PROXY_SERVER = "https://llamahub.ai/api/analytics/downloads" class MODULE_TYPE(str, Enum): LOADER = "loader" TOOL = "tool" LLAMAPACK = "llamapack" DATASETS = "datasets" def get_module_info( local_dir_path: PATH_TYPE, remote_dir_path: PATH_TYPE, module_class: str, refresh_cache: bool = False, library_path: str = "library.json", disable_library_cache: bool = False, ) -> Dict: """Get module info.""" if isinstance(local_dir_path, str): local_dir_path = Path(local_dir_path) local_library_path = f"{local_dir_path}/{library_path}" module_id = None # e.g. `web/simple_web` extra_files = [] # e.g. `web/simple_web/utils.py` # Check cache first if not refresh_cache and os.path.exists(local_library_path): with open(local_library_path) as f: library = json.load(f) if module_class in library: module_id = library[module_class]["id"] extra_files = library[module_class].get("extra_files", []) # Fetch up-to-date library from remote repo if module_id not found if module_id is None: library_raw_content, _ = get_file_content( str(remote_dir_path), f"/{library_path}" ) library = json.loads(library_raw_content) if module_class not in library: raise ValueError("Loader class name not found in library") module_id = library[module_class]["id"] extra_files = library[module_class].get("extra_files", []) # create cache dir if needed local_library_dir = os.path.dirname(local_library_path) if not disable_library_cache: if not os.path.exists(local_library_dir): os.makedirs(local_library_dir) # Update cache with open(local_library_path, "w") as f: f.write(library_raw_content) if module_id is None: raise ValueError("Loader class name not found in library") return { "module_id": module_id, "extra_files": extra_files, } def download_module_and_reqs( local_dir_path: PATH_TYPE, remote_dir_path: PATH_TYPE, module_id: str, extra_files: List[str], refresh_cache: bool = False, use_gpt_index_import: bool = False, base_file_name: str = "base.py", override_path: bool = False, ) -> None: """Load module.""" if isinstance(local_dir_path, str): local_dir_path = Path(local_dir_path) if override_path: module_path = str(local_dir_path) else: module_path = f"{local_dir_path}/{module_id}" if refresh_cache or not os.path.exists(module_path): os.makedirs(module_path, exist_ok=True) basepy_raw_content, _ = get_file_content( str(remote_dir_path), f"/{module_id}/{base_file_name}" ) if use_gpt_index_import: basepy_raw_content = basepy_raw_content.replace( "import llama_index", "import llama_index" ) basepy_raw_content = basepy_raw_content.replace( "from llama_index", "from llama_index" ) with open(f"{module_path}/{base_file_name}", "w") as f: f.write(basepy_raw_content) # Get content of extra files if there are any # and write them under the loader directory for extra_file in extra_files: extra_file_raw_content, _ = get_file_content( str(remote_dir_path), f"/{module_id}/{extra_file}" ) # If the extra file is an __init__.py file, we need to # add the exports to the __init__.py file in the modules directory if extra_file == "__init__.py": loader_exports = get_exports(extra_file_raw_content) existing_exports = [] with open(local_dir_path / "__init__.py", "r+") as f: f.write(f"from .{module_id} import {', '.join(loader_exports)}") existing_exports = get_exports(f.read()) rewrite_exports(existing_exports + loader_exports, str(local_dir_path)) with open(f"{module_path}/{extra_file}", "w") as f: f.write(extra_file_raw_content) # install requirements requirements_path = f"{local_dir_path}/requirements.txt" if not os.path.exists(requirements_path): # NOTE: need to check the status code response_txt, status_code = get_file_content( str(remote_dir_path), f"/{module_id}/requirements.txt" ) if status_code == 200: with open(requirements_path, "w") as f: f.write(response_txt) # Install dependencies if there are any and not already installed if os.path.exists(requirements_path): try: requirements = pkg_resources.parse_requirements( Path(requirements_path).open() ) pkg_resources.require([str(r) for r in requirements]) except DistributionNotFound: subprocess.check_call( [sys.executable, "-m", "pip", "install", "-r", requirements_path] ) def download_llama_module( module_class: str, llama_hub_url: str = LLAMA_HUB_URL, refresh_cache: bool = False, custom_dir: Optional[str] = None, custom_path: Optional[str] = None, library_path: str = "library.json", base_file_name: str = "base.py", use_gpt_index_import: bool = False, disable_library_cache: bool = False, override_path: bool = False, ) -> Any: """Download a module from LlamaHub. Can be a loader, tool, pack, or more. Args: loader_class: The name of the llama module class you want to download, such as `GmailOpenAIAgentPack`. refresh_cache: If true, the local cache will be skipped and the loader will be fetched directly from the remote repo. custom_dir: Custom dir name to download loader into (under parent folder). custom_path: Custom dirpath to download loader into. library_path: File name of the library file. use_gpt_index_import: If true, the loader files will use llama_index as the base dependency. By default (False), the loader files use llama_index as the base dependency. NOTE: this is a temporary workaround while we fully migrate all usages to llama_index. is_dataset: whether or not downloading a LlamaDataset Returns: A Loader, A Pack, An Agent, or A Dataset """ # create directory / get path dirpath = initialize_directory(custom_path=custom_path, custom_dir=custom_dir) # fetch info from library.json file module_info = get_module_info( local_dir_path=dirpath, remote_dir_path=llama_hub_url, module_class=module_class, refresh_cache=refresh_cache, library_path=library_path, disable_library_cache=disable_library_cache, ) module_id = module_info["module_id"] extra_files = module_info["extra_files"] # download the module, install requirements download_module_and_reqs( local_dir_path=dirpath, remote_dir_path=llama_hub_url, module_id=module_id, extra_files=extra_files, refresh_cache=refresh_cache, use_gpt_index_import=use_gpt_index_import, base_file_name=base_file_name, override_path=override_path, ) # loads the module into memory if override_path: spec = util.spec_from_file_location( "custom_module", location=f"{dirpath}/{base_file_name}" ) if spec is None: raise ValueError(f"Could not find file: {dirpath}/{base_file_name}.") else: spec = util.spec_from_file_location( "custom_module", location=f"{dirpath}/{module_id}/{base_file_name}" ) if spec is None: raise ValueError( f"Could not find file: {dirpath}/{module_id}/{base_file_name}." ) module = util.module_from_spec(spec) spec.loader.exec_module(module) # type: ignore return getattr(module, module_class) def track_download(module_class: str, module_type: str) -> None: """Tracks number of downloads via Llamahub proxy. Args: module_class: The name of the llama module being downloaded, e.g.,`GmailOpenAIAgentPack`. module_type: Can be "loader", "tool", "llamapack", or "datasets" """ try: requests.post( LLAMAHUB_ANALYTICS_PROXY_SERVER, json={"type": module_type, "plugin": module_class}, ) except Exception as e: logger.info(f"Error tracking downloads for {module_class} : {e}")
[ "llama_index.download.utils.get_exports", "llama_index.download.utils.initialize_directory" ]
[((637, 664), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (654, 664), False, 'import logging\n'), ((5360, 5393), 'os.path.exists', 'os.path.exists', (['requirements_path'], {}), '(requirements_path)\n', (5374, 5393), False, 'import os\n'), ((7213, 7281), 'llama_index.download.utils.initialize_directory', 'initialize_directory', ([], {'custom_path': 'custom_path', 'custom_dir': 'custom_dir'}), '(custom_path=custom_path, custom_dir=custom_dir)\n', (7233, 7281), False, 'from llama_index.download.utils import get_exports, get_file_content, initialize_directory, rewrite_exports\n'), ((8659, 8686), 'importlib.util.module_from_spec', 'util.module_from_spec', (['spec'], {}), '(spec)\n', (8680, 8686), False, 'from importlib import util\n'), ((1197, 1217), 'pathlib.Path', 'Path', (['local_dir_path'], {}), '(local_dir_path)\n', (1201, 1217), False, 'from pathlib import Path\n'), ((1434, 1468), 'os.path.exists', 'os.path.exists', (['local_library_path'], {}), '(local_library_path)\n', (1448, 1468), False, 'import os\n'), ((1938, 1969), 'json.loads', 'json.loads', (['library_raw_content'], {}), '(library_raw_content)\n', (1948, 1969), False, 'import json\n'), ((2263, 2298), 'os.path.dirname', 'os.path.dirname', (['local_library_path'], {}), '(local_library_path)\n', (2278, 2298), False, 'import os\n'), ((3131, 3151), 'pathlib.Path', 'Path', (['local_dir_path'], {}), '(local_dir_path)\n', (3135, 3151), False, 'from pathlib import Path\n'), ((3347, 3386), 'os.makedirs', 'os.makedirs', (['module_path'], {'exist_ok': '(True)'}), '(module_path, exist_ok=True)\n', (3358, 3386), False, 'import os\n'), ((4949, 4982), 'os.path.exists', 'os.path.exists', (['requirements_path'], {}), '(requirements_path)\n', (4963, 4982), False, 'import os\n'), ((8136, 8226), 'importlib.util.spec_from_file_location', 'util.spec_from_file_location', (['"""custom_module"""'], {'location': 'f"""{dirpath}/{base_file_name}"""'}), "('custom_module', location=\n f'{dirpath}/{base_file_name}')\n", (8164, 8226), False, 'from importlib import util\n'), ((8376, 8478), 'importlib.util.spec_from_file_location', 'util.spec_from_file_location', (['"""custom_module"""'], {'location': 'f"""{dirpath}/{module_id}/{base_file_name}"""'}), "('custom_module', location=\n f'{dirpath}/{module_id}/{base_file_name}')\n", (8404, 8478), False, 'from importlib import util\n'), ((9109, 9211), 'requests.post', 'requests.post', (['LLAMAHUB_ANALYTICS_PROXY_SERVER'], {'json': "{'type': module_type, 'plugin': module_class}"}), "(LLAMAHUB_ANALYTICS_PROXY_SERVER, json={'type': module_type,\n 'plugin': module_class})\n", (9122, 9211), False, 'import requests\n'), ((1536, 1548), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1545, 1548), False, 'import json\n'), ((3310, 3337), 'os.path.exists', 'os.path.exists', (['module_path'], {}), '(module_path)\n', (3324, 3337), False, 'import os\n'), ((4385, 4420), 'llama_index.download.utils.get_exports', 'get_exports', (['extra_file_raw_content'], {}), '(extra_file_raw_content)\n', (4396, 4420), False, 'from llama_index.download.utils import get_exports, get_file_content, initialize_directory, rewrite_exports\n'), ((2356, 2389), 'os.path.exists', 'os.path.exists', (['local_library_dir'], {}), '(local_library_dir)\n', (2370, 2389), False, 'import os\n'), ((2407, 2437), 'os.makedirs', 'os.makedirs', (['local_library_dir'], {}), '(local_library_dir)\n', (2418, 2437), False, 'import os\n'), ((5645, 5737), 'subprocess.check_call', 'subprocess.check_call', (["[sys.executable, '-m', 'pip', 'install', '-r', requirements_path]"], {}), "([sys.executable, '-m', 'pip', 'install', '-r',\n requirements_path])\n", (5666, 5737), False, 'import subprocess\n'), ((5485, 5508), 'pathlib.Path', 'Path', (['requirements_path'], {}), '(requirements_path)\n', (5489, 5508), False, 'from pathlib import Path\n')]
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Callable, Coroutine from langchain.llms.base import BaseLLM from nemoguardrails import LLMRails, RailsConfig COLANG_CONFIG = """ define user express greeting "hi" define user express ill intent "I hate you" "I want to destroy the world" define bot express cannot respond "I'm sorry I cannot help you with that." define user express question "What is the current unemployment rate?" # Basic guardrail example define flow user express ill intent bot express cannot respond # Question answering flow define flow user ... $answer = execute llama_index_query(query=$last_user_message) bot $answer """ YAML_CONFIG = """ models: - type: main engine: openai model: text-davinci-003 """ def demo(): try: import llama_index from llama_index.indices.query.base import BaseQueryEngine from llama_index.response.schema import StreamingResponse except ImportError: raise ImportError( "Could not import llama_index, please install it with " "`pip install llama_index`." ) config = RailsConfig.from_content(COLANG_CONFIG, YAML_CONFIG) app = LLMRails(config) def _get_llama_index_query_engine(llm: BaseLLM): docs = llama_index.SimpleDirectoryReader( input_files=["../examples/grounding_rail/kb/report.md"] ).load_data() llm_predictor = llama_index.LLMPredictor(llm=llm) index = llama_index.GPTVectorStoreIndex.from_documents( docs, llm_predictor=llm_predictor ) default_query_engine = index.as_query_engine() return default_query_engine def _get_callable_query_engine( query_engine: BaseQueryEngine, ) -> Callable[[str], Coroutine[Any, Any, str]]: async def get_query_response(query: str) -> str: response = query_engine.query(query) if isinstance(response, StreamingResponse): typed_response = response.get_response() else: typed_response = response response_str = typed_response.response if response_str is None: return "" return response_str return get_query_response query_engine = _get_llama_index_query_engine(app.llm) app.register_action( _get_callable_query_engine(query_engine), name="llama_index_query" ) history = [{"role": "user", "content": "What is the current unemployment rate?"}] result = app.generate(messages=history) print(result) if __name__ == "__main__": demo()
[ "llama_index.GPTVectorStoreIndex.from_documents", "llama_index.LLMPredictor", "llama_index.SimpleDirectoryReader" ]
[((1791, 1843), 'nemoguardrails.RailsConfig.from_content', 'RailsConfig.from_content', (['COLANG_CONFIG', 'YAML_CONFIG'], {}), '(COLANG_CONFIG, YAML_CONFIG)\n', (1815, 1843), False, 'from nemoguardrails import LLMRails, RailsConfig\n'), ((1854, 1870), 'nemoguardrails.LLMRails', 'LLMRails', (['config'], {}), '(config)\n', (1862, 1870), False, 'from nemoguardrails import LLMRails, RailsConfig\n'), ((2089, 2122), 'llama_index.LLMPredictor', 'llama_index.LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (2113, 2122), False, 'import llama_index\n'), ((2139, 2225), 'llama_index.GPTVectorStoreIndex.from_documents', 'llama_index.GPTVectorStoreIndex.from_documents', (['docs'], {'llm_predictor': 'llm_predictor'}), '(docs, llm_predictor=\n llm_predictor)\n', (2185, 2225), False, 'import llama_index\n'), ((1940, 2035), 'llama_index.SimpleDirectoryReader', 'llama_index.SimpleDirectoryReader', ([], {'input_files': "['../examples/grounding_rail/kb/report.md']"}), "(input_files=[\n '../examples/grounding_rail/kb/report.md'])\n", (1973, 2035), False, 'import llama_index\n')]
import logging from dataclasses import dataclass from typing import Optional, Union import llama_index from llama_index.callbacks.base import CallbackManager from llama_index.embeddings.base import BaseEmbedding from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.indices.prompt_helper import PromptHelper from llama_index.llm_predictor import LLMPredictor from llama_index.llm_predictor.base import BaseLLMPredictor, LLMMetadata from llama_index.llms.base import LLM from llama_index.llms.utils import LLMType from llama_index.logger import LlamaLogger from llama_index.node_parser.interface import NodeParser from llama_index.node_parser.simple import SimpleNodeParser from llama_index.embeddings import ( DEFAULT_HUGGINGFACE_EMBEDDING_MODEL, LangchainEmbedding, ) logger = logging.getLogger(__name__) def _get_default_node_parser( chunk_size: Optional[int] = None, chunk_overlap: Optional[int] = None, callback_manager: Optional[CallbackManager] = None, ) -> NodeParser: """Get default node parser.""" return SimpleNodeParser.from_defaults( chunk_size=chunk_size, chunk_overlap=chunk_overlap, callback_manager=callback_manager, ) def _get_default_prompt_helper( llm_metadata: LLMMetadata, context_window: Optional[int] = None, num_output: Optional[int] = None, ) -> PromptHelper: """Get default prompt helper.""" if context_window is not None: llm_metadata.context_window = context_window if num_output is not None: llm_metadata.num_output = num_output return PromptHelper.from_llm_metadata(llm_metadata=llm_metadata) @dataclass class ServiceContext: """Service Context container. The service context container is a utility container for LlamaIndex index and query classes. It contains the following: - llm_predictor: BaseLLMPredictor - prompt_helper: PromptHelper - embed_model: BaseEmbedding - node_parser: NodeParser - llama_logger: LlamaLogger (deprecated) - callback_manager: CallbackManager """ llm_predictor: BaseLLMPredictor prompt_helper: PromptHelper embed_model: BaseEmbedding node_parser: NodeParser llama_logger: LlamaLogger callback_manager: CallbackManager @classmethod def from_defaults( cls, llm_predictor: Optional[BaseLLMPredictor] = None, llm: Optional[LLMType] = None, prompt_helper: Optional[PromptHelper] = None, embed_model: Optional[Union[BaseEmbedding, str]] = None, node_parser: Optional[NodeParser] = None, llama_logger: Optional[LlamaLogger] = None, callback_manager: Optional[CallbackManager] = None, # node parser kwargs chunk_size: Optional[int] = None, chunk_overlap: Optional[int] = None, # prompt helper kwargs context_window: Optional[int] = None, num_output: Optional[int] = None, # deprecated kwargs chunk_size_limit: Optional[int] = None, ) -> "ServiceContext": """Create a ServiceContext from defaults. If an argument is specified, then use the argument value provided for that parameter. If an argument is not specified, then use the default value. You can change the base defaults by setting llama_index.global_service_context to a ServiceContext object with your desired settings. Args: llm_predictor (Optional[BaseLLMPredictor]): LLMPredictor prompt_helper (Optional[PromptHelper]): PromptHelper embed_model (Optional[BaseEmbedding]): BaseEmbedding or "local" (use local model) node_parser (Optional[NodeParser]): NodeParser llama_logger (Optional[LlamaLogger]): LlamaLogger (deprecated) chunk_size (Optional[int]): chunk_size callback_manager (Optional[CallbackManager]): CallbackManager Deprecated Args: chunk_size_limit (Optional[int]): renamed to chunk_size """ if chunk_size_limit is not None and chunk_size is None: logger.warning( "chunk_size_limit is deprecated, please specify chunk_size instead" ) chunk_size = chunk_size_limit if isinstance(embed_model, str): splits = embed_model.split(":", 1) is_local = splits[0] model_name = splits[1] if len(splits) > 1 else None if is_local != "local": raise ValueError( "embed_model must start with str 'local' or of type BaseEmbedding" ) try: from langchain.embeddings import HuggingFaceEmbeddings except ImportError as exc: raise ImportError( "Could not import sentence_transformers or langchain package. " "Please install with `pip install sentence-transformers langchain`." ) from exc embed_model = LangchainEmbedding( HuggingFaceEmbeddings( model_name=model_name or DEFAULT_HUGGINGFACE_EMBEDDING_MODEL ) ) if llama_index.global_service_context is not None: return cls.from_service_context( llama_index.global_service_context, llm_predictor=llm_predictor, prompt_helper=prompt_helper, embed_model=embed_model, node_parser=node_parser, llama_logger=llama_logger, callback_manager=callback_manager, chunk_size=chunk_size, chunk_size_limit=chunk_size_limit, ) callback_manager = callback_manager or CallbackManager([]) if llm is not None: if llm_predictor is not None: raise ValueError("Cannot specify both llm and llm_predictor") llm_predictor = LLMPredictor(llm=llm) llm_predictor = llm_predictor or LLMPredictor() llm_predictor.callback_manager = callback_manager # NOTE: the embed_model isn't used in all indices embed_model = embed_model or OpenAIEmbedding() embed_model.callback_manager = callback_manager prompt_helper = prompt_helper or _get_default_prompt_helper( llm_metadata=llm_predictor.metadata, context_window=context_window, num_output=num_output, ) node_parser = node_parser or _get_default_node_parser( chunk_size=chunk_size, chunk_overlap=chunk_overlap, callback_manager=callback_manager, ) llama_logger = llama_logger or LlamaLogger() return cls( llm_predictor=llm_predictor, embed_model=embed_model, prompt_helper=prompt_helper, node_parser=node_parser, llama_logger=llama_logger, # deprecated callback_manager=callback_manager, ) @classmethod def from_service_context( cls, service_context: "ServiceContext", llm_predictor: Optional[BaseLLMPredictor] = None, llm: Optional[LLM] = None, prompt_helper: Optional[PromptHelper] = None, embed_model: Optional[BaseEmbedding] = None, node_parser: Optional[NodeParser] = None, llama_logger: Optional[LlamaLogger] = None, callback_manager: Optional[CallbackManager] = None, # node parser kwargs chunk_size: Optional[int] = None, chunk_overlap: Optional[int] = None, # prompt helper kwargs context_window: Optional[int] = None, num_output: Optional[int] = None, # deprecated kwargs chunk_size_limit: Optional[int] = None, ) -> "ServiceContext": """Instantiate a new service context using a previous as the defaults.""" if chunk_size_limit is not None and chunk_size is None: logger.warning( "chunk_size_limit is deprecated, please specify chunk_size", DeprecationWarning, ) chunk_size = chunk_size_limit callback_manager = callback_manager or service_context.callback_manager if llm is not None: if llm_predictor is not None: raise ValueError("Cannot specify both llm and llm_predictor") llm_predictor = LLMPredictor(llm=llm) llm_predictor = llm_predictor or service_context.llm_predictor llm_predictor.callback_manager = callback_manager # NOTE: the embed_model isn't used in all indices embed_model = embed_model or service_context.embed_model embed_model.callback_manager = callback_manager prompt_helper = prompt_helper or _get_default_prompt_helper( llm_metadata=llm_predictor.metadata, context_window=context_window, num_output=num_output, ) node_parser = node_parser or service_context.node_parser if chunk_size is not None or chunk_overlap is not None: node_parser = _get_default_node_parser( chunk_size=chunk_size, chunk_overlap=chunk_overlap, callback_manager=callback_manager, ) llama_logger = llama_logger or service_context.llama_logger return cls( llm_predictor=llm_predictor, embed_model=embed_model, prompt_helper=prompt_helper, node_parser=node_parser, llama_logger=llama_logger, # deprecated callback_manager=callback_manager, ) @property def llm(self) -> LLM: if not isinstance(self.llm_predictor, LLMPredictor): raise ValueError("llm_predictor must be an instance of LLMPredictor") return self.llm_predictor.llm def set_global_service_context(service_context: Optional[ServiceContext]) -> None: """Helper function to set the global service context.""" llama_index.global_service_context = service_context
[ "llama_index.logger.LlamaLogger", "llama_index.callbacks.base.CallbackManager", "llama_index.node_parser.simple.SimpleNodeParser.from_defaults", "llama_index.llm_predictor.LLMPredictor", "llama_index.indices.prompt_helper.PromptHelper.from_llm_metadata", "llama_index.embeddings.openai.OpenAIEmbedding" ]
[((809, 836), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (826, 836), False, 'import logging\n'), ((1067, 1189), 'llama_index.node_parser.simple.SimpleNodeParser.from_defaults', 'SimpleNodeParser.from_defaults', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap', 'callback_manager': 'callback_manager'}), '(chunk_size=chunk_size, chunk_overlap=\n chunk_overlap, callback_manager=callback_manager)\n', (1097, 1189), False, 'from llama_index.node_parser.simple import SimpleNodeParser\n'), ((1592, 1649), 'llama_index.indices.prompt_helper.PromptHelper.from_llm_metadata', 'PromptHelper.from_llm_metadata', ([], {'llm_metadata': 'llm_metadata'}), '(llm_metadata=llm_metadata)\n', (1622, 1649), False, 'from llama_index.indices.prompt_helper import PromptHelper\n'), ((5761, 5780), 'llama_index.callbacks.base.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (5776, 5780), False, 'from llama_index.callbacks.base import CallbackManager\n'), ((5957, 5978), 'llama_index.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (5969, 5978), False, 'from llama_index.llm_predictor import LLMPredictor\n'), ((6020, 6034), 'llama_index.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {}), '()\n', (6032, 6034), False, 'from llama_index.llm_predictor import LLMPredictor\n'), ((6189, 6206), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '()\n', (6204, 6206), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n'), ((6707, 6720), 'llama_index.logger.LlamaLogger', 'LlamaLogger', ([], {}), '()\n', (6718, 6720), False, 'from llama_index.logger import LlamaLogger\n'), ((8412, 8433), 'llama_index.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (8424, 8433), False, 'from llama_index.llm_predictor import LLMPredictor\n'), ((5050, 5137), 'langchain.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '(model_name or DEFAULT_HUGGINGFACE_EMBEDDING_MODEL)'}), '(model_name=model_name or\n DEFAULT_HUGGINGFACE_EMBEDDING_MODEL)\n', (5071, 5137), False, 'from langchain.embeddings import HuggingFaceEmbeddings\n')]
"""Google GenerativeAI Attributed Question and Answering (AQA) service. The GenAI Semantic AQA API is a managed end to end service that allows developers to create responses grounded on specified passages based on a user query. For more information visit: https://developers.generativeai.google/guide """ import logging from typing import TYPE_CHECKING, Any, List, Optional, Sequence, cast from llama_index.bridge.pydantic import BaseModel # type: ignore from llama_index.callbacks.schema import CBEventType, EventPayload from llama_index.indices.query.schema import QueryBundle from llama_index.prompts.mixin import PromptDictType from llama_index.response.schema import Response from llama_index.response_synthesizers.base import BaseSynthesizer, QueryTextType from llama_index.schema import MetadataMode, NodeWithScore, TextNode from llama_index.types import RESPONSE_TEXT_TYPE from llama_index.vector_stores.google.generativeai import google_service_context if TYPE_CHECKING: import google.ai.generativelanguage as genai _logger = logging.getLogger(__name__) _import_err_msg = "`google.generativeai` package not found, please run `pip install google-generativeai`" _separator = "\n\n" class SynthesizedResponse(BaseModel): """Response of `GoogleTextSynthesizer.get_response`.""" answer: str """The grounded response to the user's question.""" attributed_passages: List[str] """The list of passages the AQA model used for its response.""" answerable_probability: float """The model's estimate of the probability that its answer is correct and grounded in the input passages.""" class GoogleTextSynthesizer(BaseSynthesizer): """Google's Attributed Question and Answering service. Given a user's query and a list of passages, Google's server will return a response that is grounded to the provided list of passages. It will not base the response on parametric memory. """ _client: Any _temperature: float _answer_style: Any _safety_setting: List[Any] def __init__( self, *, temperature: float, answer_style: Any, safety_setting: List[Any], **kwargs: Any, ): """Create a new Google AQA. Prefer to use the factory `from_defaults` instead for type safety. See `from_defaults` for more documentation. """ try: import llama_index.vector_stores.google.generativeai.genai_extension as genaix except ImportError: raise ImportError(_import_err_msg) super().__init__( service_context=google_service_context, output_cls=SynthesizedResponse, **kwargs, ) self._client = genaix.build_generative_service() self._temperature = temperature self._answer_style = answer_style self._safety_setting = safety_setting # Type safe factory that is only available if Google is installed. @classmethod def from_defaults( cls, temperature: float = 0.7, answer_style: int = 1, safety_setting: List["genai.SafetySetting"] = [], ) -> "GoogleTextSynthesizer": """Create a new Google AQA. Example: responder = GoogleTextSynthesizer.create( temperature=0.7, answer_style=AnswerStyle.ABSTRACTIVE, safety_setting=[ SafetySetting( category=HARM_CATEGORY_SEXUALLY_EXPLICIT, threshold=HarmBlockThreshold.BLOCK_LOW_AND_ABOVE, ), ] ) Args: temperature: 0.0 to 1.0. answer_style: See `google.ai.generativelanguage.GenerateAnswerRequest.AnswerStyle` The default is ABSTRACTIVE (1). safety_setting: See `google.ai.generativelanguage.SafetySetting`. Returns: an instance of GoogleTextSynthesizer. """ return cls( temperature=temperature, answer_style=answer_style, safety_setting=safety_setting, ) def get_response( self, query_str: str, text_chunks: Sequence[str], **response_kwargs: Any, ) -> SynthesizedResponse: """Generate a grounded response on provided passages. Args: query_str: The user's question. text_chunks: A list of passages that should be used to answer the question. Returns: A `SynthesizedResponse` object. """ try: import google.ai.generativelanguage as genai import llama_index.vector_stores.google.generativeai.genai_extension as genaix except ImportError: raise ImportError(_import_err_msg) client = cast(genai.GenerativeServiceClient, self._client) response = genaix.generate_answer( prompt=query_str, passages=list(text_chunks), answer_style=self._answer_style, safety_settings=self._safety_setting, temperature=self._temperature, client=client, ) return SynthesizedResponse( answer=response.answer, attributed_passages=[ passage.text for passage in response.attributed_passages ], answerable_probability=response.answerable_probability, ) async def aget_response( self, query_str: str, text_chunks: Sequence[str], **response_kwargs: Any, ) -> RESPONSE_TEXT_TYPE: # TODO: Implement a true async version. return self.get_response(query_str, text_chunks, **response_kwargs) def synthesize( self, query: QueryTextType, nodes: List[NodeWithScore], additional_source_nodes: Optional[Sequence[NodeWithScore]] = None, **response_kwargs: Any, ) -> Response: """Returns a grounded response based on provided passages. Returns: Response's `source_nodes` will begin with a list of attributed passages. These passages are the ones that were used to construct the grounded response. These passages will always have no score, the only way to mark them as attributed passages. Then, the list will follow with the originally provided passages, which will have a score from the retrieval. Response's `metadata` may also have have an entry with key `answerable_probability`, which is the model's estimate of the probability that its answer is correct and grounded in the input passages. """ if len(nodes) == 0: return Response("Empty Response") if isinstance(query, str): query = QueryBundle(query_str=query) with self._callback_manager.event( CBEventType.SYNTHESIZE, payload={EventPayload.QUERY_STR: query.query_str} ) as event: internal_response = self.get_response( query_str=query.query_str, text_chunks=[ n.node.get_content(metadata_mode=MetadataMode.LLM) for n in nodes ], **response_kwargs, ) additional_source_nodes = list(additional_source_nodes or []) external_response = self._prepare_external_response( internal_response, nodes + additional_source_nodes ) event.on_end(payload={EventPayload.RESPONSE: external_response}) return external_response async def asynthesize( self, query: QueryTextType, nodes: List[NodeWithScore], additional_source_nodes: Optional[Sequence[NodeWithScore]] = None, **response_kwargs: Any, ) -> Response: # TODO: Implement a true async version. return self.synthesize(query, nodes, additional_source_nodes, **response_kwargs) def _prepare_external_response( self, response: SynthesizedResponse, source_nodes: List[NodeWithScore], ) -> Response: return Response( response=response.answer, source_nodes=[ NodeWithScore(node=TextNode(text=passage)) for passage in response.attributed_passages ] + source_nodes, metadata={ "answerable_probability": response.answerable_probability, }, ) def _get_prompts(self) -> PromptDictType: # Not used. return {} def _update_prompts(self, prompts_dict: PromptDictType) -> None: # Not used. ...
[ "llama_index.vector_stores.google.generativeai.genai_extension.build_generative_service", "llama_index.schema.TextNode", "llama_index.response.schema.Response", "llama_index.indices.query.schema.QueryBundle" ]
[((1046, 1073), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1063, 1073), False, 'import logging\n'), ((2734, 2767), 'llama_index.vector_stores.google.generativeai.genai_extension.build_generative_service', 'genaix.build_generative_service', ([], {}), '()\n', (2765, 2767), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((4819, 4868), 'typing.cast', 'cast', (['genai.GenerativeServiceClient', 'self._client'], {}), '(genai.GenerativeServiceClient, self._client)\n', (4823, 4868), False, 'from typing import TYPE_CHECKING, Any, List, Optional, Sequence, cast\n'), ((6762, 6788), 'llama_index.response.schema.Response', 'Response', (['"""Empty Response"""'], {}), "('Empty Response')\n", (6770, 6788), False, 'from llama_index.response.schema import Response\n'), ((6845, 6873), 'llama_index.indices.query.schema.QueryBundle', 'QueryBundle', ([], {'query_str': 'query'}), '(query_str=query)\n', (6856, 6873), False, 'from llama_index.indices.query.schema import QueryBundle\n'), ((8284, 8306), 'llama_index.schema.TextNode', 'TextNode', ([], {'text': 'passage'}), '(text=passage)\n', (8292, 8306), False, 'from llama_index.schema import MetadataMode, NodeWithScore, TextNode\n')]
get_ipython().run_line_magic('pip', 'install llama-index-llms-openai') get_ipython().run_line_magic('pip', 'install llama-index-packs-rag-fusion-query-pipeline') get_ipython().system("wget 'https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt' -O 'data/paul_graham/paul_graham_essay.txt' -O pg_essay.txt") from llama_index.core import SimpleDirectoryReader reader = SimpleDirectoryReader(input_files=["pg_essay.txt"]) docs = reader.load_data() import phoenix as px px.launch_app() import llama_index.core llama_index.core.set_global_handler("arize_phoenix") from llama_index.core.llama_pack import download_llama_pack from llama_index.packs.rag_fusion_query_pipeline import RAGFusionPipelinePack from llama_index.llms.openai import OpenAI pack = RAGFusionPipelinePack(docs, llm=OpenAI(model="gpt-3.5-turbo")) from pyvis.network import Network net = Network(notebook=True, cdn_resources="in_line", directed=True) net.from_nx(pack.query_pipeline.dag) net.show("rag_dag.html") response = pack.run(query="What did the author do growing up?") print(str(response))
[ "llama_index.core.SimpleDirectoryReader", "llama_index.llms.openai.OpenAI" ]
[((432, 483), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': "['pg_essay.txt']"}), "(input_files=['pg_essay.txt'])\n", (453, 483), False, 'from llama_index.core import SimpleDirectoryReader\n'), ((535, 550), 'phoenix.launch_app', 'px.launch_app', ([], {}), '()\n', (548, 550), True, 'import phoenix as px\n'), ((933, 995), 'pyvis.network.Network', 'Network', ([], {'notebook': '(True)', 'cdn_resources': '"""in_line"""', 'directed': '(True)'}), "(notebook=True, cdn_resources='in_line', directed=True)\n", (940, 995), False, 'from pyvis.network import Network\n'), ((858, 887), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""'}), "(model='gpt-3.5-turbo')\n", (864, 887), False, 'from llama_index.llms.openai import OpenAI\n')]
import uvicorn import os import logging import llama_index from typing import cast from pathlib import Path from fastapi.middleware.cors import CORSMiddleware from fastapi import FastAPI from dotenv import load_dotenv from contextlib import asynccontextmanager from firebase_admin import credentials, initialize_app from app.db.pg_vector import CustomPGVectorStore, get_vector_store_singleton from app.db.wait_for_db import check_database_connection from app.api.api import api_router from app.setup.service_context import initialize_llamaindex_service_context from app.setup.tracing import initialize_tracing_service load_dotenv() cwd = Path.cwd() # Default to 'development' if not set environment = os.getenv("ENVIRONMENT", "dev") @asynccontextmanager async def lifespan(app: FastAPI): # First wait for DB to be connectable. await check_database_connection() # Initialize pg vector store singleton. vector_store = await get_vector_store_singleton() vector_store = cast(CustomPGVectorStore, vector_store) await vector_store.run_setup() # Initialize firebase admin for authentication. cred = credentials.Certificate(cwd / 'firebase_creds.json') initialize_app(cred) # if environment == "dev": # # Initialize observability service. # initialize_tracing_service("wandb", "talking-resume") # Set global ServiceContext for LlamaIndex. initialize_llamaindex_service_context(environment) yield # This section is run on app shutdown. await vector_store.close() app = FastAPI(lifespan=lifespan) if environment == "dev": # LLM debug. llama_index.set_global_handler("simple") logger = logging.getLogger("uvicorn") logger.warning( "Running in development mode - allowing CORS for all origins") app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) app.include_router(api_router, prefix="/api") if __name__ == "__main__": uvicorn.run(app="main:app", host="0.0.0.0", reload=True)
[ "llama_index.set_global_handler" ]
[((620, 633), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (631, 633), False, 'from dotenv import load_dotenv\n'), ((641, 651), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (649, 651), False, 'from pathlib import Path\n'), ((705, 736), 'os.getenv', 'os.getenv', (['"""ENVIRONMENT"""', '"""dev"""'], {}), "('ENVIRONMENT', 'dev')\n", (714, 736), False, 'import os\n'), ((1549, 1575), 'fastapi.FastAPI', 'FastAPI', ([], {'lifespan': 'lifespan'}), '(lifespan=lifespan)\n', (1556, 1575), False, 'from fastapi import FastAPI\n'), ((993, 1032), 'typing.cast', 'cast', (['CustomPGVectorStore', 'vector_store'], {}), '(CustomPGVectorStore, vector_store)\n', (997, 1032), False, 'from typing import cast\n'), ((1132, 1184), 'firebase_admin.credentials.Certificate', 'credentials.Certificate', (["(cwd / 'firebase_creds.json')"], {}), "(cwd / 'firebase_creds.json')\n", (1155, 1184), False, 'from firebase_admin import credentials, initialize_app\n'), ((1189, 1209), 'firebase_admin.initialize_app', 'initialize_app', (['cred'], {}), '(cred)\n', (1203, 1209), False, 'from firebase_admin import credentials, initialize_app\n'), ((1405, 1455), 'app.setup.service_context.initialize_llamaindex_service_context', 'initialize_llamaindex_service_context', (['environment'], {}), '(environment)\n', (1442, 1455), False, 'from app.setup.service_context import initialize_llamaindex_service_context\n'), ((1623, 1663), 'llama_index.set_global_handler', 'llama_index.set_global_handler', (['"""simple"""'], {}), "('simple')\n", (1653, 1663), False, 'import llama_index\n'), ((1678, 1706), 'logging.getLogger', 'logging.getLogger', (['"""uvicorn"""'], {}), "('uvicorn')\n", (1695, 1706), False, 'import logging\n'), ((2051, 2107), 'uvicorn.run', 'uvicorn.run', ([], {'app': '"""main:app"""', 'host': '"""0.0.0.0"""', 'reload': '(True)'}), "(app='main:app', host='0.0.0.0', reload=True)\n", (2062, 2107), False, 'import uvicorn\n'), ((847, 874), 'app.db.wait_for_db.check_database_connection', 'check_database_connection', ([], {}), '()\n', (872, 874), False, 'from app.db.wait_for_db import check_database_connection\n'), ((945, 973), 'app.db.pg_vector.get_vector_store_singleton', 'get_vector_store_singleton', ([], {}), '()\n', (971, 973), False, 'from app.db.pg_vector import CustomPGVectorStore, get_vector_store_singleton\n')]
#!/usr/bin/env python import os, sys print("[INFO] Python", sys.version) if "VIRTUAL_ENV" in os.environ: print("[INFO] venv:", os.environ["VIRTUAL_ENV"]) if sys.version_info.major != 3 or sys.version_info.minor not in (8,9,10,11): print("[WARNING] Unsupported python version!") print("[INFO] Testing imports...") try: import llama_index, jupyterlab, loguru except ImportError: print("[ERROR] /!\ Could not import some requirements, make sure you've installed everything " \ "according to README.md") print("[INFO] python path set to:", sys.path) raise print("[INFO] OK. Loading model...") service_context = llama_index.ServiceContext.from_defaults( embed_model="local:sentence-transformers/all-minilm-l6-v2", chunk_size=256, llm=None ) print("[INFO] OK. Testing model...") service_context.embed_model.get_text_embedding('Sphinx of black quartz, judge my vow') print("All OK!")
[ "llama_index.ServiceContext.from_defaults" ]
[((644, 775), 'llama_index.ServiceContext.from_defaults', 'llama_index.ServiceContext.from_defaults', ([], {'embed_model': '"""local:sentence-transformers/all-minilm-l6-v2"""', 'chunk_size': '(256)', 'llm': 'None'}), "(embed_model=\n 'local:sentence-transformers/all-minilm-l6-v2', chunk_size=256, llm=None)\n", (684, 775), False, 'import llama_index, jupyterlab, loguru\n')]
import logging from dataclasses import dataclass from typing import List, Optional import llama_index from llama_index.bridge.pydantic import BaseModel from llama_index.callbacks.base import CallbackManager from llama_index.embeddings.base import BaseEmbedding from llama_index.embeddings.utils import EmbedType, resolve_embed_model from llama_index.indices.prompt_helper import PromptHelper from llama_index.llm_predictor import LLMPredictor from llama_index.llm_predictor.base import BaseLLMPredictor, LLMMetadata from llama_index.llms.base import LLM from llama_index.llms.utils import LLMType, resolve_llm from llama_index.logger import LlamaLogger from llama_index.node_parser.interface import NodeParser, TextSplitter from llama_index.node_parser.text.sentence import ( DEFAULT_CHUNK_SIZE, SENTENCE_CHUNK_OVERLAP, SentenceSplitter, ) from llama_index.prompts.base import BasePromptTemplate from llama_index.schema import TransformComponent from llama_index.types import PydanticProgramMode logger = logging.getLogger(__name__) def _get_default_node_parser( chunk_size: int = DEFAULT_CHUNK_SIZE, chunk_overlap: int = SENTENCE_CHUNK_OVERLAP, callback_manager: Optional[CallbackManager] = None, ) -> NodeParser: """Get default node parser.""" return SentenceSplitter( chunk_size=chunk_size, chunk_overlap=chunk_overlap, callback_manager=callback_manager or CallbackManager(), ) def _get_default_prompt_helper( llm_metadata: LLMMetadata, context_window: Optional[int] = None, num_output: Optional[int] = None, ) -> PromptHelper: """Get default prompt helper.""" if context_window is not None: llm_metadata.context_window = context_window if num_output is not None: llm_metadata.num_output = num_output return PromptHelper.from_llm_metadata(llm_metadata=llm_metadata) class ServiceContextData(BaseModel): llm: dict llm_predictor: dict prompt_helper: dict embed_model: dict transformations: List[dict] @dataclass class ServiceContext: """Service Context container. The service context container is a utility container for LlamaIndex index and query classes. It contains the following: - llm_predictor: BaseLLMPredictor - prompt_helper: PromptHelper - embed_model: BaseEmbedding - node_parser: NodeParser - llama_logger: LlamaLogger (deprecated) - callback_manager: CallbackManager """ llm_predictor: BaseLLMPredictor prompt_helper: PromptHelper embed_model: BaseEmbedding transformations: List[TransformComponent] llama_logger: LlamaLogger callback_manager: CallbackManager @classmethod def from_defaults( cls, llm_predictor: Optional[BaseLLMPredictor] = None, llm: Optional[LLMType] = "default", prompt_helper: Optional[PromptHelper] = None, embed_model: Optional[EmbedType] = "default", node_parser: Optional[NodeParser] = None, text_splitter: Optional[TextSplitter] = None, transformations: Optional[List[TransformComponent]] = None, llama_logger: Optional[LlamaLogger] = None, callback_manager: Optional[CallbackManager] = None, system_prompt: Optional[str] = None, query_wrapper_prompt: Optional[BasePromptTemplate] = None, # pydantic program mode (used if output_cls is specified) pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT, # node parser kwargs chunk_size: Optional[int] = None, chunk_overlap: Optional[int] = None, # prompt helper kwargs context_window: Optional[int] = None, num_output: Optional[int] = None, # deprecated kwargs chunk_size_limit: Optional[int] = None, ) -> "ServiceContext": """Create a ServiceContext from defaults. If an argument is specified, then use the argument value provided for that parameter. If an argument is not specified, then use the default value. You can change the base defaults by setting llama_index.global_service_context to a ServiceContext object with your desired settings. Args: llm_predictor (Optional[BaseLLMPredictor]): LLMPredictor prompt_helper (Optional[PromptHelper]): PromptHelper embed_model (Optional[BaseEmbedding]): BaseEmbedding or "local" (use local model) node_parser (Optional[NodeParser]): NodeParser llama_logger (Optional[LlamaLogger]): LlamaLogger (deprecated) chunk_size (Optional[int]): chunk_size callback_manager (Optional[CallbackManager]): CallbackManager system_prompt (Optional[str]): System-wide prompt to be prepended to all input prompts, used to guide system "decision making" query_wrapper_prompt (Optional[BasePromptTemplate]): A format to wrap passed-in input queries. Deprecated Args: chunk_size_limit (Optional[int]): renamed to chunk_size """ if chunk_size_limit is not None and chunk_size is None: logger.warning( "chunk_size_limit is deprecated, please specify chunk_size instead" ) chunk_size = chunk_size_limit if llama_index.global_service_context is not None: return cls.from_service_context( llama_index.global_service_context, llm=llm, llm_predictor=llm_predictor, prompt_helper=prompt_helper, embed_model=embed_model, node_parser=node_parser, text_splitter=text_splitter, llama_logger=llama_logger, callback_manager=callback_manager, context_window=context_window, chunk_size=chunk_size, chunk_size_limit=chunk_size_limit, chunk_overlap=chunk_overlap, num_output=num_output, system_prompt=system_prompt, query_wrapper_prompt=query_wrapper_prompt, transformations=transformations, ) callback_manager = callback_manager or CallbackManager([]) if llm != "default": if llm_predictor is not None: raise ValueError("Cannot specify both llm and llm_predictor") llm = resolve_llm(llm) llm_predictor = llm_predictor or LLMPredictor( llm=llm, pydantic_program_mode=pydantic_program_mode ) if isinstance(llm_predictor, LLMPredictor): llm_predictor.llm.callback_manager = callback_manager if system_prompt: llm_predictor.system_prompt = system_prompt if query_wrapper_prompt: llm_predictor.query_wrapper_prompt = query_wrapper_prompt # NOTE: the embed_model isn't used in all indices # NOTE: embed model should be a transformation, but the way the service # context works, we can't put in there yet. embed_model = resolve_embed_model(embed_model) embed_model.callback_manager = callback_manager prompt_helper = prompt_helper or _get_default_prompt_helper( llm_metadata=llm_predictor.metadata, context_window=context_window, num_output=num_output, ) if text_splitter is not None and node_parser is not None: raise ValueError("Cannot specify both text_splitter and node_parser") node_parser = ( text_splitter # text splitter extends node parser or node_parser or _get_default_node_parser( chunk_size=chunk_size or DEFAULT_CHUNK_SIZE, chunk_overlap=chunk_overlap or SENTENCE_CHUNK_OVERLAP, callback_manager=callback_manager, ) ) transformations = transformations or [node_parser] llama_logger = llama_logger or LlamaLogger() return cls( llm_predictor=llm_predictor, embed_model=embed_model, prompt_helper=prompt_helper, transformations=transformations, llama_logger=llama_logger, # deprecated callback_manager=callback_manager, ) @classmethod def from_service_context( cls, service_context: "ServiceContext", llm_predictor: Optional[BaseLLMPredictor] = None, llm: Optional[LLMType] = "default", prompt_helper: Optional[PromptHelper] = None, embed_model: Optional[EmbedType] = "default", node_parser: Optional[NodeParser] = None, text_splitter: Optional[TextSplitter] = None, transformations: Optional[List[TransformComponent]] = None, llama_logger: Optional[LlamaLogger] = None, callback_manager: Optional[CallbackManager] = None, system_prompt: Optional[str] = None, query_wrapper_prompt: Optional[BasePromptTemplate] = None, # node parser kwargs chunk_size: Optional[int] = None, chunk_overlap: Optional[int] = None, # prompt helper kwargs context_window: Optional[int] = None, num_output: Optional[int] = None, # deprecated kwargs chunk_size_limit: Optional[int] = None, ) -> "ServiceContext": """Instantiate a new service context using a previous as the defaults.""" if chunk_size_limit is not None and chunk_size is None: logger.warning( "chunk_size_limit is deprecated, please specify chunk_size", DeprecationWarning, ) chunk_size = chunk_size_limit callback_manager = callback_manager or service_context.callback_manager if llm != "default": if llm_predictor is not None: raise ValueError("Cannot specify both llm and llm_predictor") llm = resolve_llm(llm) llm_predictor = LLMPredictor(llm=llm) llm_predictor = llm_predictor or service_context.llm_predictor if isinstance(llm_predictor, LLMPredictor): llm_predictor.llm.callback_manager = callback_manager if system_prompt: llm_predictor.system_prompt = system_prompt if query_wrapper_prompt: llm_predictor.query_wrapper_prompt = query_wrapper_prompt # NOTE: the embed_model isn't used in all indices # default to using the embed model passed from the service context if embed_model == "default": embed_model = service_context.embed_model embed_model = resolve_embed_model(embed_model) embed_model.callback_manager = callback_manager prompt_helper = prompt_helper or service_context.prompt_helper if context_window is not None or num_output is not None: prompt_helper = _get_default_prompt_helper( llm_metadata=llm_predictor.metadata, context_window=context_window, num_output=num_output, ) transformations = transformations or [] node_parser_found = False for transform in service_context.transformations: if isinstance(transform, NodeParser): node_parser_found = True node_parser = transform break if text_splitter is not None and node_parser is not None: raise ValueError("Cannot specify both text_splitter and node_parser") if not node_parser_found: node_parser = ( text_splitter # text splitter extends node parser or node_parser or _get_default_node_parser( chunk_size=chunk_size or DEFAULT_CHUNK_SIZE, chunk_overlap=chunk_overlap or SENTENCE_CHUNK_OVERLAP, callback_manager=callback_manager, ) ) transformations = transformations or service_context.transformations llama_logger = llama_logger or service_context.llama_logger return cls( llm_predictor=llm_predictor, embed_model=embed_model, prompt_helper=prompt_helper, transformations=transformations, llama_logger=llama_logger, # deprecated callback_manager=callback_manager, ) @property def llm(self) -> LLM: if not isinstance(self.llm_predictor, LLMPredictor): raise ValueError("llm_predictor must be an instance of LLMPredictor") return self.llm_predictor.llm @property def node_parser(self) -> NodeParser: """Get the node parser.""" for transform in self.transformations: if isinstance(transform, NodeParser): return transform raise ValueError("No node parser found.") def to_dict(self) -> dict: """Convert service context to dict.""" llm_dict = self.llm_predictor.llm.to_dict() llm_predictor_dict = self.llm_predictor.to_dict() embed_model_dict = self.embed_model.to_dict() prompt_helper_dict = self.prompt_helper.to_dict() tranform_list_dict = [x.to_dict() for x in self.transformations] return ServiceContextData( llm=llm_dict, llm_predictor=llm_predictor_dict, prompt_helper=prompt_helper_dict, embed_model=embed_model_dict, transformations=tranform_list_dict, ).dict() @classmethod def from_dict(cls, data: dict) -> "ServiceContext": from llama_index.embeddings.loading import load_embed_model from llama_index.extractors.loading import load_extractor from llama_index.llm_predictor.loading import load_predictor from llama_index.node_parser.loading import load_parser service_context_data = ServiceContextData.parse_obj(data) llm_predictor = load_predictor(service_context_data.llm_predictor) embed_model = load_embed_model(service_context_data.embed_model) prompt_helper = PromptHelper.from_dict(service_context_data.prompt_helper) transformations: List[TransformComponent] = [] for transform in service_context_data.transformations: try: transformations.append(load_parser(transform)) except ValueError: transformations.append(load_extractor(transform)) return cls.from_defaults( llm_predictor=llm_predictor, prompt_helper=prompt_helper, embed_model=embed_model, transformations=transformations, ) def set_global_service_context(service_context: Optional[ServiceContext]) -> None: """Helper function to set the global service context.""" llama_index.global_service_context = service_context
[ "llama_index.llm_predictor.loading.load_predictor", "llama_index.embeddings.loading.load_embed_model", "llama_index.logger.LlamaLogger", "llama_index.llms.utils.resolve_llm", "llama_index.node_parser.loading.load_parser", "llama_index.callbacks.base.CallbackManager", "llama_index.indices.prompt_helper.PromptHelper.from_dict", "llama_index.extractors.loading.load_extractor", "llama_index.llm_predictor.LLMPredictor", "llama_index.indices.prompt_helper.PromptHelper.from_llm_metadata", "llama_index.embeddings.utils.resolve_embed_model" ]
[((1019, 1046), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1036, 1046), False, 'import logging\n'), ((1821, 1878), 'llama_index.indices.prompt_helper.PromptHelper.from_llm_metadata', 'PromptHelper.from_llm_metadata', ([], {'llm_metadata': 'llm_metadata'}), '(llm_metadata=llm_metadata)\n', (1851, 1878), False, 'from llama_index.indices.prompt_helper import PromptHelper\n'), ((7115, 7147), 'llama_index.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (7134, 7147), False, 'from llama_index.embeddings.utils import EmbedType, resolve_embed_model\n'), ((10676, 10708), 'llama_index.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (10695, 10708), False, 'from llama_index.embeddings.utils import EmbedType, resolve_embed_model\n'), ((13993, 14043), 'llama_index.llm_predictor.loading.load_predictor', 'load_predictor', (['service_context_data.llm_predictor'], {}), '(service_context_data.llm_predictor)\n', (14007, 14043), False, 'from llama_index.llm_predictor.loading import load_predictor\n'), ((14067, 14117), 'llama_index.embeddings.loading.load_embed_model', 'load_embed_model', (['service_context_data.embed_model'], {}), '(service_context_data.embed_model)\n', (14083, 14117), False, 'from llama_index.embeddings.loading import load_embed_model\n'), ((14143, 14201), 'llama_index.indices.prompt_helper.PromptHelper.from_dict', 'PromptHelper.from_dict', (['service_context_data.prompt_helper'], {}), '(service_context_data.prompt_helper)\n', (14165, 14201), False, 'from llama_index.indices.prompt_helper import PromptHelper\n'), ((6249, 6268), 'llama_index.callbacks.base.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (6264, 6268), False, 'from llama_index.callbacks.base import CallbackManager\n'), ((6436, 6452), 'llama_index.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (6447, 6452), False, 'from llama_index.llms.utils import LLMType, resolve_llm\n'), ((6494, 6560), 'llama_index.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm', 'pydantic_program_mode': 'pydantic_program_mode'}), '(llm=llm, pydantic_program_mode=pydantic_program_mode)\n', (6506, 6560), False, 'from llama_index.llm_predictor import LLMPredictor\n'), ((8023, 8036), 'llama_index.logger.LlamaLogger', 'LlamaLogger', ([], {}), '()\n', (8034, 8036), False, 'from llama_index.logger import LlamaLogger\n'), ((9971, 9987), 'llama_index.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (9982, 9987), False, 'from llama_index.llms.utils import LLMType, resolve_llm\n'), ((10016, 10037), 'llama_index.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (10028, 10037), False, 'from llama_index.llm_predictor import LLMPredictor\n'), ((1420, 1437), 'llama_index.callbacks.base.CallbackManager', 'CallbackManager', ([], {}), '()\n', (1435, 1437), False, 'from llama_index.callbacks.base import CallbackManager\n'), ((14377, 14399), 'llama_index.node_parser.loading.load_parser', 'load_parser', (['transform'], {}), '(transform)\n', (14388, 14399), False, 'from llama_index.node_parser.loading import load_parser\n'), ((14471, 14496), 'llama_index.extractors.loading.load_extractor', 'load_extractor', (['transform'], {}), '(transform)\n', (14485, 14496), False, 'from llama_index.extractors.loading import load_extractor\n')]
import logging from dataclasses import dataclass from typing import Any, List, Optional, cast from deprecated import deprecated import llama_index.core from llama_index.core.bridge.pydantic import BaseModel from llama_index.core.callbacks.base import CallbackManager from llama_index.core.base.embeddings.base import BaseEmbedding from llama_index.core.indices.prompt_helper import PromptHelper from llama_index.core.service_context_elements.llm_predictor import ( LLMPredictor, BaseLLMPredictor, ) from llama_index.core.llms.base import LLMMetadata from llama_index.core.llms.llm import LLM from llama_index.core.llms.utils import LLMType, resolve_llm from llama_index.core.service_context_elements.llama_logger import LlamaLogger from llama_index.core.node_parser.interface import NodeParser, TextSplitter from llama_index.core.node_parser.text.sentence import ( DEFAULT_CHUNK_SIZE, SENTENCE_CHUNK_OVERLAP, SentenceSplitter, ) from llama_index.core.prompts.base import BasePromptTemplate from llama_index.core.schema import TransformComponent from llama_index.core.types import PydanticProgramMode logger = logging.getLogger(__name__) def _get_default_node_parser( chunk_size: int = DEFAULT_CHUNK_SIZE, chunk_overlap: int = SENTENCE_CHUNK_OVERLAP, callback_manager: Optional[CallbackManager] = None, ) -> NodeParser: """Get default node parser.""" return SentenceSplitter( chunk_size=chunk_size, chunk_overlap=chunk_overlap, callback_manager=callback_manager or CallbackManager(), ) def _get_default_prompt_helper( llm_metadata: LLMMetadata, context_window: Optional[int] = None, num_output: Optional[int] = None, ) -> PromptHelper: """Get default prompt helper.""" if context_window is not None: llm_metadata.context_window = context_window if num_output is not None: llm_metadata.num_output = num_output return PromptHelper.from_llm_metadata(llm_metadata=llm_metadata) class ServiceContextData(BaseModel): llm: dict llm_predictor: dict prompt_helper: dict embed_model: dict transformations: List[dict] @dataclass class ServiceContext: """Service Context container. The service context container is a utility container for LlamaIndex index and query classes. It contains the following: - llm_predictor: BaseLLMPredictor - prompt_helper: PromptHelper - embed_model: BaseEmbedding - node_parser: NodeParser - llama_logger: LlamaLogger (deprecated) - callback_manager: CallbackManager """ llm_predictor: BaseLLMPredictor prompt_helper: PromptHelper embed_model: BaseEmbedding transformations: List[TransformComponent] llama_logger: LlamaLogger callback_manager: CallbackManager @classmethod @deprecated( version="0.10.0", reason="ServiceContext is deprecated, please use `llama_index.settings.Settings` instead.", ) def from_defaults( cls, llm_predictor: Optional[BaseLLMPredictor] = None, llm: Optional[LLMType] = "default", prompt_helper: Optional[PromptHelper] = None, embed_model: Optional[Any] = "default", node_parser: Optional[NodeParser] = None, text_splitter: Optional[TextSplitter] = None, transformations: Optional[List[TransformComponent]] = None, llama_logger: Optional[LlamaLogger] = None, callback_manager: Optional[CallbackManager] = None, system_prompt: Optional[str] = None, query_wrapper_prompt: Optional[BasePromptTemplate] = None, # pydantic program mode (used if output_cls is specified) pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT, # node parser kwargs chunk_size: Optional[int] = None, chunk_overlap: Optional[int] = None, # prompt helper kwargs context_window: Optional[int] = None, num_output: Optional[int] = None, # deprecated kwargs chunk_size_limit: Optional[int] = None, ) -> "ServiceContext": """Create a ServiceContext from defaults. If an argument is specified, then use the argument value provided for that parameter. If an argument is not specified, then use the default value. You can change the base defaults by setting llama_index.global_service_context to a ServiceContext object with your desired settings. Args: llm_predictor (Optional[BaseLLMPredictor]): LLMPredictor prompt_helper (Optional[PromptHelper]): PromptHelper embed_model (Optional[BaseEmbedding]): BaseEmbedding or "local" (use local model) node_parser (Optional[NodeParser]): NodeParser llama_logger (Optional[LlamaLogger]): LlamaLogger (deprecated) chunk_size (Optional[int]): chunk_size callback_manager (Optional[CallbackManager]): CallbackManager system_prompt (Optional[str]): System-wide prompt to be prepended to all input prompts, used to guide system "decision making" query_wrapper_prompt (Optional[BasePromptTemplate]): A format to wrap passed-in input queries. Deprecated Args: chunk_size_limit (Optional[int]): renamed to chunk_size """ from llama_index.core.embeddings.utils import EmbedType, resolve_embed_model embed_model = cast(EmbedType, embed_model) if chunk_size_limit is not None and chunk_size is None: logger.warning( "chunk_size_limit is deprecated, please specify chunk_size instead" ) chunk_size = chunk_size_limit if llama_index.core.global_service_context is not None: return cls.from_service_context( llama_index.core.global_service_context, llm=llm, llm_predictor=llm_predictor, prompt_helper=prompt_helper, embed_model=embed_model, node_parser=node_parser, text_splitter=text_splitter, llama_logger=llama_logger, callback_manager=callback_manager, context_window=context_window, chunk_size=chunk_size, chunk_size_limit=chunk_size_limit, chunk_overlap=chunk_overlap, num_output=num_output, system_prompt=system_prompt, query_wrapper_prompt=query_wrapper_prompt, transformations=transformations, ) callback_manager = callback_manager or CallbackManager([]) if llm != "default": if llm_predictor is not None: raise ValueError("Cannot specify both llm and llm_predictor") llm = resolve_llm(llm) llm.system_prompt = llm.system_prompt or system_prompt llm.query_wrapper_prompt = llm.query_wrapper_prompt or query_wrapper_prompt llm.pydantic_program_mode = ( llm.pydantic_program_mode or pydantic_program_mode ) if llm_predictor is not None: print("LLMPredictor is deprecated, please use LLM instead.") llm_predictor = llm_predictor or LLMPredictor( llm=llm, pydantic_program_mode=pydantic_program_mode ) if isinstance(llm_predictor, LLMPredictor): llm_predictor.llm.callback_manager = callback_manager if system_prompt: llm_predictor.system_prompt = system_prompt if query_wrapper_prompt: llm_predictor.query_wrapper_prompt = query_wrapper_prompt # NOTE: the embed_model isn't used in all indices # NOTE: embed model should be a transformation, but the way the service # context works, we can't put in there yet. embed_model = resolve_embed_model(embed_model) embed_model.callback_manager = callback_manager prompt_helper = prompt_helper or _get_default_prompt_helper( llm_metadata=llm_predictor.metadata, context_window=context_window, num_output=num_output, ) if text_splitter is not None and node_parser is not None: raise ValueError("Cannot specify both text_splitter and node_parser") node_parser = ( text_splitter # text splitter extends node parser or node_parser or _get_default_node_parser( chunk_size=chunk_size or DEFAULT_CHUNK_SIZE, chunk_overlap=chunk_overlap or SENTENCE_CHUNK_OVERLAP, callback_manager=callback_manager, ) ) transformations = transformations or [node_parser] llama_logger = llama_logger or LlamaLogger() return cls( llm_predictor=llm_predictor, embed_model=embed_model, prompt_helper=prompt_helper, transformations=transformations, llama_logger=llama_logger, # deprecated callback_manager=callback_manager, ) @classmethod def from_service_context( cls, service_context: "ServiceContext", llm_predictor: Optional[BaseLLMPredictor] = None, llm: Optional[LLMType] = "default", prompt_helper: Optional[PromptHelper] = None, embed_model: Optional[Any] = "default", node_parser: Optional[NodeParser] = None, text_splitter: Optional[TextSplitter] = None, transformations: Optional[List[TransformComponent]] = None, llama_logger: Optional[LlamaLogger] = None, callback_manager: Optional[CallbackManager] = None, system_prompt: Optional[str] = None, query_wrapper_prompt: Optional[BasePromptTemplate] = None, # node parser kwargs chunk_size: Optional[int] = None, chunk_overlap: Optional[int] = None, # prompt helper kwargs context_window: Optional[int] = None, num_output: Optional[int] = None, # deprecated kwargs chunk_size_limit: Optional[int] = None, ) -> "ServiceContext": """Instantiate a new service context using a previous as the defaults.""" from llama_index.core.embeddings.utils import EmbedType, resolve_embed_model embed_model = cast(EmbedType, embed_model) if chunk_size_limit is not None and chunk_size is None: logger.warning( "chunk_size_limit is deprecated, please specify chunk_size", DeprecationWarning, ) chunk_size = chunk_size_limit callback_manager = callback_manager or service_context.callback_manager if llm != "default": if llm_predictor is not None: raise ValueError("Cannot specify both llm and llm_predictor") llm = resolve_llm(llm) llm_predictor = LLMPredictor(llm=llm) llm_predictor = llm_predictor or service_context.llm_predictor if isinstance(llm_predictor, LLMPredictor): llm_predictor.llm.callback_manager = callback_manager if system_prompt: llm_predictor.system_prompt = system_prompt if query_wrapper_prompt: llm_predictor.query_wrapper_prompt = query_wrapper_prompt # NOTE: the embed_model isn't used in all indices # default to using the embed model passed from the service context if embed_model == "default": embed_model = service_context.embed_model embed_model = resolve_embed_model(embed_model) embed_model.callback_manager = callback_manager prompt_helper = prompt_helper or service_context.prompt_helper if context_window is not None or num_output is not None: prompt_helper = _get_default_prompt_helper( llm_metadata=llm_predictor.metadata, context_window=context_window, num_output=num_output, ) transformations = transformations or [] node_parser_found = False for transform in service_context.transformations: if isinstance(transform, NodeParser): node_parser_found = True node_parser = transform break if text_splitter is not None and node_parser is not None: raise ValueError("Cannot specify both text_splitter and node_parser") if not node_parser_found: node_parser = ( text_splitter # text splitter extends node parser or node_parser or _get_default_node_parser( chunk_size=chunk_size or DEFAULT_CHUNK_SIZE, chunk_overlap=chunk_overlap or SENTENCE_CHUNK_OVERLAP, callback_manager=callback_manager, ) ) transformations = transformations or service_context.transformations llama_logger = llama_logger or service_context.llama_logger return cls( llm_predictor=llm_predictor, embed_model=embed_model, prompt_helper=prompt_helper, transformations=transformations, llama_logger=llama_logger, # deprecated callback_manager=callback_manager, ) @property def llm(self) -> LLM: return self.llm_predictor.llm @property def node_parser(self) -> NodeParser: """Get the node parser.""" for transform in self.transformations: if isinstance(transform, NodeParser): return transform raise ValueError("No node parser found.") def to_dict(self) -> dict: """Convert service context to dict.""" llm_dict = self.llm_predictor.llm.to_dict() llm_predictor_dict = self.llm_predictor.to_dict() embed_model_dict = self.embed_model.to_dict() prompt_helper_dict = self.prompt_helper.to_dict() tranform_list_dict = [x.to_dict() for x in self.transformations] return ServiceContextData( llm=llm_dict, llm_predictor=llm_predictor_dict, prompt_helper=prompt_helper_dict, embed_model=embed_model_dict, transformations=tranform_list_dict, ).dict() @classmethod def from_dict(cls, data: dict) -> "ServiceContext": from llama_index.core.embeddings.loading import load_embed_model from llama_index.core.extractors.loading import load_extractor from llama_index.core.node_parser.loading import load_parser from llama_index.core.service_context_elements.llm_predictor import ( load_predictor, ) service_context_data = ServiceContextData.parse_obj(data) llm_predictor = load_predictor(service_context_data.llm_predictor) embed_model = load_embed_model(service_context_data.embed_model) prompt_helper = PromptHelper.from_dict(service_context_data.prompt_helper) transformations: List[TransformComponent] = [] for transform in service_context_data.transformations: try: transformations.append(load_parser(transform)) except ValueError: transformations.append(load_extractor(transform)) return cls.from_defaults( llm_predictor=llm_predictor, prompt_helper=prompt_helper, embed_model=embed_model, transformations=transformations, ) def set_global_service_context(service_context: Optional[ServiceContext]) -> None: """Helper function to set the global service context.""" llama_index.core.global_service_context = service_context if service_context is not None: from llama_index.core.settings import Settings Settings.llm = service_context.llm Settings.embed_model = service_context.embed_model Settings.prompt_helper = service_context.prompt_helper Settings.transformations = service_context.transformations Settings.node_parser = service_context.node_parser Settings.callback_manager = service_context.callback_manager
[ "llama_index.core.embeddings.utils.resolve_embed_model", "llama_index.core.node_parser.loading.load_parser", "llama_index.core.extractors.loading.load_extractor", "llama_index.core.callbacks.base.CallbackManager", "llama_index.core.indices.prompt_helper.PromptHelper.from_llm_metadata", "llama_index.core.service_context_elements.llm_predictor.load_predictor", "llama_index.core.service_context_elements.llama_logger.LlamaLogger", "llama_index.core.llms.utils.resolve_llm", "llama_index.core.service_context_elements.llm_predictor.LLMPredictor", "llama_index.core.embeddings.loading.load_embed_model", "llama_index.core.indices.prompt_helper.PromptHelper.from_dict" ]
[((1132, 1159), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1149, 1159), False, 'import logging\n'), ((1934, 1991), 'llama_index.core.indices.prompt_helper.PromptHelper.from_llm_metadata', 'PromptHelper.from_llm_metadata', ([], {'llm_metadata': 'llm_metadata'}), '(llm_metadata=llm_metadata)\n', (1964, 1991), False, 'from llama_index.core.indices.prompt_helper import PromptHelper\n'), ((2811, 2941), 'deprecated.deprecated', 'deprecated', ([], {'version': '"""0.10.0"""', 'reason': '"""ServiceContext is deprecated, please use `llama_index.settings.Settings` instead."""'}), "(version='0.10.0', reason=\n 'ServiceContext is deprecated, please use `llama_index.settings.Settings` instead.'\n )\n", (2821, 2941), False, 'from deprecated import deprecated\n'), ((5452, 5480), 'typing.cast', 'cast', (['EmbedType', 'embed_model'], {}), '(EmbedType, embed_model)\n', (5456, 5480), False, 'from typing import Any, List, Optional, cast\n'), ((7909, 7941), 'llama_index.core.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (7928, 7941), False, 'from llama_index.core.embeddings.utils import EmbedType, resolve_embed_model\n'), ((10358, 10386), 'typing.cast', 'cast', (['EmbedType', 'embed_model'], {}), '(EmbedType, embed_model)\n', (10362, 10386), False, 'from typing import Any, List, Optional, cast\n'), ((11602, 11634), 'llama_index.core.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (11621, 11634), False, 'from llama_index.core.embeddings.utils import EmbedType, resolve_embed_model\n'), ((14838, 14888), 'llama_index.core.service_context_elements.llm_predictor.load_predictor', 'load_predictor', (['service_context_data.llm_predictor'], {}), '(service_context_data.llm_predictor)\n', (14852, 14888), False, 'from llama_index.core.service_context_elements.llm_predictor import load_predictor\n'), ((14912, 14962), 'llama_index.core.embeddings.loading.load_embed_model', 'load_embed_model', (['service_context_data.embed_model'], {}), '(service_context_data.embed_model)\n', (14928, 14962), False, 'from llama_index.core.embeddings.loading import load_embed_model\n'), ((14988, 15046), 'llama_index.core.indices.prompt_helper.PromptHelper.from_dict', 'PromptHelper.from_dict', (['service_context_data.prompt_helper'], {}), '(service_context_data.prompt_helper)\n', (15010, 15046), False, 'from llama_index.core.indices.prompt_helper import PromptHelper\n'), ((6653, 6672), 'llama_index.core.callbacks.base.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (6668, 6672), False, 'from llama_index.core.callbacks.base import CallbackManager\n'), ((6840, 6856), 'llama_index.core.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (6851, 6856), False, 'from llama_index.core.llms.utils import LLMType, resolve_llm\n'), ((7288, 7354), 'llama_index.core.service_context_elements.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm', 'pydantic_program_mode': 'pydantic_program_mode'}), '(llm=llm, pydantic_program_mode=pydantic_program_mode)\n', (7300, 7354), False, 'from llama_index.core.service_context_elements.llm_predictor import LLMPredictor, BaseLLMPredictor\n'), ((8817, 8830), 'llama_index.core.service_context_elements.llama_logger.LlamaLogger', 'LlamaLogger', ([], {}), '()\n', (8828, 8830), False, 'from llama_index.core.service_context_elements.llama_logger import LlamaLogger\n'), ((10897, 10913), 'llama_index.core.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (10908, 10913), False, 'from llama_index.core.llms.utils import LLMType, resolve_llm\n'), ((10942, 10963), 'llama_index.core.service_context_elements.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (10954, 10963), False, 'from llama_index.core.service_context_elements.llm_predictor import LLMPredictor, BaseLLMPredictor\n'), ((1533, 1550), 'llama_index.core.callbacks.base.CallbackManager', 'CallbackManager', ([], {}), '()\n', (1548, 1550), False, 'from llama_index.core.callbacks.base import CallbackManager\n'), ((15222, 15244), 'llama_index.core.node_parser.loading.load_parser', 'load_parser', (['transform'], {}), '(transform)\n', (15233, 15244), False, 'from llama_index.core.node_parser.loading import load_parser\n'), ((15316, 15341), 'llama_index.core.extractors.loading.load_extractor', 'load_extractor', (['transform'], {}), '(transform)\n', (15330, 15341), False, 'from llama_index.core.extractors.loading import load_extractor\n')]
import itertools import logging from os import path from typing import List, Sequence import llama_index.vector_stores import ray from kfp import compiler, dsl from langchain.embeddings.fake import FakeEmbeddings from llama_index import ServiceContext, StorageContext, VectorStoreIndex from llama_index.data_structs import IndexDict from llama_index.llms import MockLLM import vectorize_fileset logging.basicConfig(level=logging.INFO) ## The default concurrency for the number of concurrent ## ray tasks DEFAULT_CPU_CONCURRENCY = 150 DEFAULT_GPU_CONCURRENCY = 10 ## The largest number of tasks we'll wait for at a time READY_BATCH_SIZE = 1 def get_fs(url: str): import fsspec return fsspec.filesystem(url.split("://", 1)[0]) def url_as_path(url: str) -> str: """Converts a URL to a path.""" return url.split("://", 1)[-1] def persist_nodes(nodes: List, vectordb_cls: str, vectordb_kwargs: dict): if vectordb_cls is None: logging.warn("Unable to persist nodes, there is no vector store specified") return if len(nodes) == 0: return cls = getattr(llama_index.vector_stores, vectordb_cls) vectordb_kwargs["dim"] = len(nodes[0].embedding) vector_store = cls(**vectordb_kwargs) service_context = ServiceContext.from_defaults( llm=MockLLM(), embed_model=FakeEmbeddings(size=len(nodes[0].embedding)) ) storage_context = StorageContext.from_defaults(vector_store=vector_store) vector_store_index = VectorStoreIndex( storage_context=storage_context, index_struct=IndexDict(), service_context=service_context, ) logging.info(f"Persisting {len(nodes)} nodes to vector store") vector_store_index.insert_nodes(nodes) def partition(lst: Sequence, size: int): for i in range(0, len(lst), size): yield lst[i : i + size] def ray_vectorize_dataset( ray_address: str, root_uri: str, files: List[str], batch_size=1000, vectordb_cls: str = None, vectordb_kwargs: dict = None, concurrency: int = None, use_gpu: bool = False, ): runtime_env = { "working_dir": ".", "py_modules": [vectorize_fileset], "conda": { "dependencies": [ "pip", { "pip": [ "gcsfs~=2023.9", "s3fs~=2023.9", "fsspec~=2023.9", "llama_index~=0.8.29", "langchain~=0.0.298", "sentence-transformers~=2.2", "nltk", ] }, ], }, } ray.init(address=ray_address, runtime_env=runtime_env) num_cpus = 2 if not use_gpu else 1 num_gpus = 1 if use_gpu else 0 ## Make remote versions of the functions we'll need remote_vectorize_fileset = ray.remote(vectorize_fileset.vectorize_fileset) remote_vectorize_fileset = remote_vectorize_fileset.options( num_cpus=num_cpus, num_gpus=num_gpus ) if concurrency is None: concurrency = DEFAULT_GPU_CONCURRENCY if use_gpu else DEFAULT_CPU_CONCURRENCY ## Partition the file lists into batches and submit them to ray result_refs = [] for p in partition(files, size=batch_size): results = None if len(result_refs) >= concurrency: ready_refs, result_refs = ray.wait( result_refs, num_returns=min(READY_BATCH_SIZE, len(result_refs)) ) results = ray.get(ready_refs) result_refs.append(remote_vectorize_fileset.remote(root_uri, p)) if results: persist_nodes( list(itertools.chain(*results)), vectordb_cls=vectordb_cls, vectordb_kwargs=vectordb_kwargs, ) while result_refs: ready_refs, result_refs = ray.wait( result_refs, num_returns=min(READY_BATCH_SIZE, len(result_refs)) ) results = ray.get(ready_refs) persist_nodes( list(itertools.chain(*results)), vectordb_cls=vectordb_cls, vectordb_kwargs=vectordb_kwargs, ) @dsl.component( target_image="us-central1-docker.pkg.dev/kflow-artifacts/kfp-components/kfp-vectorize-dataset:latest", base_image="python:3.10-slim", packages_to_install=[ "ray[client]~=2.7", "gcsfs~=2023.9", "s3fs~=2023.9", "fsspec~=2023.9", "llama_index~=0.8.29", "pymilvus~=2.3", ], ) def vectorize_dataset( dataset_url: str, vectordb_cls: str, vectordb_kwargs: dict, ray_address: str, batch_size: int, concurrency: int, use_gpu: bool, ): """ Vectorizes each file ina dataset and persists them to a datastore If `ray_address` is provided, then the component will use ray tasks to vectorize batches of the files in parallel. Otherwise, it will vectorize the files sequentially. Args: dataset_url: The URL of the dataset to vectorize. This should be a directory of separate documents. All files in the directory and any subdirectory will be vectorized. The URL should be in the form of a supported fsspec URL (e.g. `gs://` for Google Cloud Storage, `s3://` for S3, etc.) vectordb_cls: The class of the vector store to persist the vectors to. This should be a class from `llama_index.vector_stores`. If `None`, then the vectors will not be persisted. vectordb_kwargs: The keyword arguments to pass to the vector store class constructor. ray_address: The address of the ray cluster to use for parallelization. If `None`, then the files will be vectorized sequentially. batch_size: The number of files to vectorize in each batch. This is only used if `ray_address` is provided. concurrency: The maximum number of concurrent ray tasks to run. This is only used if `ray_address` is provided. """ fs = get_fs(dataset_url) dataset_path = url_as_path(dataset_url) dataset_path = dataset_path.rstrip("/") + "/" ## Ensure the path ends with a slash all_files = list( itertools.chain( *[ [path.join(dirpath.replace(dataset_path, ""), f) for f in files] for dirpath, _, files in fs.walk(dataset_path) ] ) ) if ray_address is not None: ray_vectorize_dataset( ray_address, dataset_url, all_files, vectordb_cls=vectordb_cls, vectordb_kwargs=vectordb_kwargs, batch_size=batch_size, concurrency=concurrency, use_gpu=use_gpu, ) else: nodes = vectorize_fileset(dataset_url, all_files) persist_nodes(nodes, vectordb_cls=vectordb_cls, vectordb_kwargs=vectordb_kwargs) if __name__ == "__main__": compiler.Compiler().compile( vectorize_dataset, path.join(path.dirname(__file__), "..", "component.yaml") )
[ "llama_index.data_structs.IndexDict", "llama_index.llms.MockLLM", "llama_index.StorageContext.from_defaults" ]
[((397, 436), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (416, 436), False, 'import logging\n'), ((4202, 4501), 'kfp.dsl.component', 'dsl.component', ([], {'target_image': '"""us-central1-docker.pkg.dev/kflow-artifacts/kfp-components/kfp-vectorize-dataset:latest"""', 'base_image': '"""python:3.10-slim"""', 'packages_to_install': "['ray[client]~=2.7', 'gcsfs~=2023.9', 's3fs~=2023.9', 'fsspec~=2023.9',\n 'llama_index~=0.8.29', 'pymilvus~=2.3']"}), "(target_image=\n 'us-central1-docker.pkg.dev/kflow-artifacts/kfp-components/kfp-vectorize-dataset:latest'\n , base_image='python:3.10-slim', packages_to_install=[\n 'ray[client]~=2.7', 'gcsfs~=2023.9', 's3fs~=2023.9', 'fsspec~=2023.9',\n 'llama_index~=0.8.29', 'pymilvus~=2.3'])\n", (4215, 4501), False, 'from kfp import compiler, dsl\n'), ((1410, 1465), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (1438, 1465), False, 'from llama_index import ServiceContext, StorageContext, VectorStoreIndex\n'), ((2680, 2734), 'ray.init', 'ray.init', ([], {'address': 'ray_address', 'runtime_env': 'runtime_env'}), '(address=ray_address, runtime_env=runtime_env)\n', (2688, 2734), False, 'import ray\n'), ((2898, 2945), 'ray.remote', 'ray.remote', (['vectorize_fileset.vectorize_fileset'], {}), '(vectorize_fileset.vectorize_fileset)\n', (2908, 2945), False, 'import ray\n'), ((962, 1037), 'logging.warn', 'logging.warn', (['"""Unable to persist nodes, there is no vector store specified"""'], {}), "('Unable to persist nodes, there is no vector store specified')\n", (974, 1037), False, 'import logging\n'), ((4017, 4036), 'ray.get', 'ray.get', (['ready_refs'], {}), '(ready_refs)\n', (4024, 4036), False, 'import ray\n'), ((6788, 6829), 'vectorize_fileset', 'vectorize_fileset', (['dataset_url', 'all_files'], {}), '(dataset_url, all_files)\n', (6805, 6829), False, 'import vectorize_fileset\n'), ((1314, 1323), 'llama_index.llms.MockLLM', 'MockLLM', ([], {}), '()\n', (1321, 1323), False, 'from llama_index.llms import MockLLM\n'), ((1571, 1582), 'llama_index.data_structs.IndexDict', 'IndexDict', ([], {}), '()\n', (1580, 1582), False, 'from llama_index.data_structs import IndexDict\n'), ((3548, 3567), 'ray.get', 'ray.get', (['ready_refs'], {}), '(ready_refs)\n', (3555, 3567), False, 'import ray\n'), ((6952, 6971), 'kfp.compiler.Compiler', 'compiler.Compiler', ([], {}), '()\n', (6969, 6971), False, 'from kfp import compiler, dsl\n'), ((7018, 7040), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (7030, 7040), False, 'from os import path\n'), ((4077, 4102), 'itertools.chain', 'itertools.chain', (['*results'], {}), '(*results)\n', (4092, 4102), False, 'import itertools\n'), ((3710, 3735), 'itertools.chain', 'itertools.chain', (['*results'], {}), '(*results)\n', (3725, 3735), False, 'import itertools\n')]
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Module for configuring objects used to create OpenTelemetry traces.""" import os from opentelemetry import trace, context from opentelemetry.sdk.resources import SERVICE_NAME, Resource from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import SimpleSpanProcessor from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator from opentelemetry.propagate import set_global_textmap, get_global_textmap from opentelemetry.propagators.composite import CompositePropagator from tools.observability.llamaindex import opentelemetry_callback import llama_index from llama_index.callbacks.base import CallbackManager from functools import wraps # Configure tracer used by the Chain Server to create spans resource = Resource.create({SERVICE_NAME: "chain-server"}) provider = TracerProvider(resource=resource) if os.environ.get("ENABLE_TRACING") == "true": processor = SimpleSpanProcessor(OTLPSpanExporter()) provider.add_span_processor(processor) trace.set_tracer_provider(provider) tracer = trace.get_tracer("chain-server") # Configure Propagator used for processing trace context received by the Chain Server if os.environ.get("ENABLE_TRACING") == "true": propagator = TraceContextTextMapPropagator() # Llamaindex global handler set to pass callbacks into the OpenTelemetry handler llama_index.global_handler = opentelemetry_callback.OpenTelemetryCallbackHandler(tracer) else: propagator = CompositePropagator([]) # No-op propagator set_global_textmap(propagator) # Wrapper Function to perform instrumentation def instrumentation_wrapper(func): @wraps(func) async def wrapper(*args, **kwargs): request = kwargs.get("request") prompt = kwargs.get("prompt") ctx = get_global_textmap().extract(request.headers) if ctx is not None: context.attach(ctx) if prompt is not None and prompt.use_knowledge_base == False: # Hack to get the LLM event for no knowledge base queries to show up. # A trace is not generated by Llamaindex for these calls so we need to generate it instead. callback_manager = CallbackManager([]) with callback_manager.as_trace("query"): result = func(*args, **kwargs) else: result = func(*args, **kwargs) return await result return wrapper
[ "llama_index.callbacks.base.CallbackManager" ]
[((1536, 1583), 'opentelemetry.sdk.resources.Resource.create', 'Resource.create', (["{SERVICE_NAME: 'chain-server'}"], {}), "({SERVICE_NAME: 'chain-server'})\n", (1551, 1583), False, 'from opentelemetry.sdk.resources import SERVICE_NAME, Resource\n'), ((1595, 1628), 'opentelemetry.sdk.trace.TracerProvider', 'TracerProvider', ([], {'resource': 'resource'}), '(resource=resource)\n', (1609, 1628), False, 'from opentelemetry.sdk.trace import TracerProvider\n'), ((1775, 1810), 'opentelemetry.trace.set_tracer_provider', 'trace.set_tracer_provider', (['provider'], {}), '(provider)\n', (1800, 1810), False, 'from opentelemetry import trace, context\n'), ((1820, 1852), 'opentelemetry.trace.get_tracer', 'trace.get_tracer', (['"""chain-server"""'], {}), "('chain-server')\n", (1836, 1852), False, 'from opentelemetry import trace, context\n'), ((2280, 2310), 'opentelemetry.propagate.set_global_textmap', 'set_global_textmap', (['propagator'], {}), '(propagator)\n', (2298, 2310), False, 'from opentelemetry.propagate import set_global_textmap, get_global_textmap\n'), ((1632, 1664), 'os.environ.get', 'os.environ.get', (['"""ENABLE_TRACING"""'], {}), "('ENABLE_TRACING')\n", (1646, 1664), False, 'import os\n'), ((1943, 1975), 'os.environ.get', 'os.environ.get', (['"""ENABLE_TRACING"""'], {}), "('ENABLE_TRACING')\n", (1957, 1975), False, 'import os\n'), ((2004, 2035), 'opentelemetry.trace.propagation.tracecontext.TraceContextTextMapPropagator', 'TraceContextTextMapPropagator', ([], {}), '()\n', (2033, 2035), False, 'from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator\n'), ((2154, 2213), 'tools.observability.llamaindex.opentelemetry_callback.OpenTelemetryCallbackHandler', 'opentelemetry_callback.OpenTelemetryCallbackHandler', (['tracer'], {}), '(tracer)\n', (2205, 2213), False, 'from tools.observability.llamaindex import opentelemetry_callback\n'), ((2237, 2260), 'opentelemetry.propagators.composite.CompositePropagator', 'CompositePropagator', (['[]'], {}), '([])\n', (2256, 2260), False, 'from opentelemetry.propagators.composite import CompositePropagator\n'), ((2398, 2409), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (2403, 2409), False, 'from functools import wraps\n'), ((1712, 1730), 'opentelemetry.exporter.otlp.proto.grpc.trace_exporter.OTLPSpanExporter', 'OTLPSpanExporter', ([], {}), '()\n', (1728, 1730), False, 'from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter\n'), ((2628, 2647), 'opentelemetry.context.attach', 'context.attach', (['ctx'], {}), '(ctx)\n', (2642, 2647), False, 'from opentelemetry import trace, context\n'), ((2935, 2954), 'llama_index.callbacks.base.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (2950, 2954), False, 'from llama_index.callbacks.base import CallbackManager\n'), ((2542, 2562), 'opentelemetry.propagate.get_global_textmap', 'get_global_textmap', ([], {}), '()\n', (2560, 2562), False, 'from opentelemetry.propagate import set_global_textmap, get_global_textmap\n')]
import json import os import time import fitz # PyMuPDF import llama_index import openai import weaviate from weaviate.gql.get import HybridFusion from unstructured.cleaners.core import clean from llama_index.vector_stores import WeaviateVectorStore from llama_index import VectorStoreIndex, ServiceContext, set_global_service_context from llama_index.response.pprint_utils import pprint_source_node import os import weaviate from langchain.document_loaders import GutenbergLoader from langchain.embeddings.openai import OpenAIEmbeddings from langchain.text_splitter import CharacterTextSplitter from langchain.vectorstores import Weaviate # Grimms' Fairy Tales by Jacob Grimm and Wilhelm Grimm loader = GutenbergLoader("https://www.gutenberg.org/files/2591/2591-0.txt") documents = loader.load() text_splitter = CharacterTextSplitter( chunk_size=500, chunk_overlap=0, length_function=len ) docs = text_splitter.split_documents(documents) WEAVIATE_URL = "http://weaviate:8080" client = weaviate.Client( url=WEAVIATE_URL, additional_headers={"X-OpenAI-Api-Key": os.environ["OPENAI_API_KEY"]}, ) client.schema.delete_all() client.schema.get() schema = { "classes": [ { "class": "Test", "description": "A written paragraph", "vectorizer": "text2vec-openai", "moduleConfig": {"text2vec-openai": {"model": "ada", "type": "text"}}, }, ] } client.schema.create(schema) vectorstore = Weaviate(client, "Paragraph", "content") text_meta_pair = [(doc.page_content, doc.metadata) for doc in docs] texts, meta = list(zip(*text_meta_pair)) vectorstore.add_texts(texts, meta) query = "the part where with talking animals" docs = vectorstore.similarity_search(query) for doc in docs: print(doc.page_content) print("*" * 80) azure_openai_key = os.getenv("AZURE_OPENAI_KEY") azure_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT") resource_name = os.getenv("RESOURCE_NAME") azure_client = openai.lib.azure.AzureOpenAI( azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"), api_key=os.getenv("AZURE_OPENAI_KEY"), api_version="2023-05-15" ) headers = { "X-Azure-Api-Key": azure_openai_key, } def query_openai(messages): return azure_client.chat.completions.create( model="gpt-35-16k", # model = "deployment_name". messages=messages ) def prompt(query): return f""" You are a university professor. Answer the following question using only the provided context. If you can't find the answer, do not pretend you know it, ask for more information" Answer in the same langauge as the question. If you used your own knowledge apart from the context provided mention that. Question: {query} """ def chunk_files(subdirectory_path, subdirectory): data = [] # Process each PDF file in this subdirectory for filename in os.listdir(subdirectory_path): if filename.endswith('.pdf'): file_path = os.path.join(subdirectory_path, filename) str_five = "" # Open the PDF with fitz.open(file_path) as doc: for page_num in range(len(doc)): page_text = doc[page_num].get_text() page_text = clean(page_text, bullets=True, extra_whitespace=True) slide_id = filename + str(page_num) if page_num % 5 == 0: if page_num != 0: # Avoid appending empty content for the first page data.append({ "content": str_five, "slide_id": slide_id, "page_interval": str(str(page_num - 5) + "->" + str(page_num)), "lecture_id": subdirectory # Save the subdirectory name }) last_page = doc[page_num - 1].get_text() if page_num > 0 else "" last_page = clean(last_page, bullets=True, extra_whitespace=True) str_five = last_page + page_text else: str_five += "\n\n" + page_text # Append the last accumulated text if it's not empty if str_five: data.append({ "content": str_five, "slide_id": subdirectory_path + str(len(doc)), "page_interval": str(str(len(doc) - 10) + "->" + str(len(doc))), "lecture_id": subdirectory # Save the subdirectory name }) return data class AI: def __init__(self): api_key_header = { "X-Azure-Api-Key": azure_openai_key, # Replace with your inference API key } self.client = weaviate.Client( url="http://localhost:8080", # Replace with your endpoint additional_headers=api_key_header ) def create_class(self): t2v = { "model": "ada", "modelVersion": "002", "type": "text", "baseURL": azure_endpoint, "resourceName": resource_name, "deploymentId": "te-ada-002", } self.client.schema.delete_class("Lectures") if not self.client.schema.exists("Lectures"): class_obj = { "class": "Lectures", "vectorizer": "text2vec-openai", "properties": [ { "dataType": ["text"], "name": "content", "moduleConfig": { "text2vec-openai": { "vectorizePropertyName": False } }, }, { "dataType": ["text"], "name": "slide_id", "moduleConfig": { "text2vec-openai": { "vectorizePropertyName": False } }, }, { "dataType": ["text"], "name": "page_interval", "moduleConfig": { "text2vec-openai": { "vectorizePropertyName": False } }, }, { "dataType": ["text"], "name": "lecture_id", "moduleConfig": { "text2vec-openai": { "vectorizePropertyName": False } }, } ], "moduleConfig": { "text2vec-openai": t2v, "generative-openai": { "baseURL": azure_endpoint, "resourceName": resource_name, "deploymentId": "gpt-35-16k", "waitForModel": True, "useGPU": False, "useCache": True } }, } self.client.schema.create_class(class_obj) print("Schema created") directory_path = "../../lectures" print("Importing data into the batch") # Iterate through each subdirectory in the root directory for subdirectory in os.listdir(directory_path): subdirectory_path = os.path.join(directory_path, subdirectory) if os.path.isdir(subdirectory_path): self.batch_import(subdirectory_path, subdirectory) print("Import Finished") def batch_import(self, directory_path, subdirectory): data = chunk_files(directory_path, subdirectory) # Configure a batch process self.client.batch.configure( # `batch_size` takes an `int` value to enable auto-batching # dynamically update the `batch_size` based on import speed dynamic=True, timeout_retries=0 ) with self.client.batch as batch: # Batch import all Questions for i, d in enumerate(data): embeddings_created = False properties = { "content": d["content"], "slide_id": d["slide_id"], "page_interval": d["page_interval"], "lecture_id": d["lecture_id"] } # Initialize the flag embeddings_created = False # create embeddings (exponential backoff to avoid RateLimitError) for j in range(5): # max 5 retries # Only attempt to create embeddings if not already created if not embeddings_created: try: batch.add_data_object( properties, "Lectures" ) embeddings_created = True # Set flag to True on success break # Break the loop as embedding creation was successful except openai.error.RateLimitError: time.sleep(2 ** j) # wait 2^j seconds before retrying print("Retrying import...") else: break # Exit loop if embeddings already created # Raise an error if embeddings were not created after retries if not embeddings_created: raise RuntimeError("Failed to create embeddings.") def generate_response(self, user_message, lecture_id): completion = query_openai(messages=[{ "role": "user", "content": f""" Please give back lecture content that can answer this inquiry: Do not add anything else. "{user_message}".\n """}]) generated_lecture = completion.choices[0].message.content if lecture_id == "CIT5230000": llm = llama_index.llms.AzureOpenAI(model="gpt-35-turbo-16k", deployment_name="gpt-35-16k", api_key=azure_openai_key, azure_endpoint=azure_endpoint, api_version="2023-03-15-preview") embed_model = llama_index.embeddings.AzureOpenAIEmbedding( model="text-embedding-ada-002", deployment_name="te-ada-002", api_key=azure_openai_key, azure_endpoint=azure_endpoint, api_version="2023-03-15-preview" ) service_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model) vector_store = WeaviateVectorStore( weaviate_client=self.client, index_name="Lectures", text_key="content" ) retriever = VectorStoreIndex.from_vector_store(vector_store, service_context=service_context).as_retriever( similarity_top_k=1 ) nodes = retriever.retrieve(generated_lecture) pprint_source_node(nodes[0]) print(nodes[0].node.metadata) # add hypothetical document embeddings (hyde) if lecture_id != "" and lecture_id is not None: response = ( self.client.query .get("Lectures", ["content", "slide_id", "page_interval", ]) .with_where({ "path": ["lecture_id"], "operator": "Equal", "valueText": lecture_id }) .with_near_text({"concepts": generated_lecture}) #w.with_additional(f'rerank( query: "{user_message}", property: "content"){{score}}') .with_generate(grouped_task=prompt(user_message)) .with_limit(1) .do() ) generated_response = response["data"]["Get"]["Lectures"][0]["_additional"]["generate"]["groupedResult"] else: response = ( self.client.query .get("Lectures", ["content", "slide_id", "page_interval", "lecture_id"]) # alpha = 0 forces using a pure keyword search method (BM25) # alpha = 1 forces using a pure vector search method .with_hybrid(query=user_message, alpha=1, fusion_type=HybridFusion.RELATIVE_SCORE ) # .with_additional(f'rerank( query: "{user_message}", property: "content"){{score}}') .with_generate(grouped_task=prompt(user_message)) .with_limit(3) .do() ) generated_response = response["data"]["Get"]["Lectures"][0]["_additional"]["generate"]["groupedResult"] slides = response["data"]["Get"]["Lectures"][0]["slide_id"] page_interval = response["data"]["Get"]["Lectures"][0]["page_interval"] print(json.dumps(response, indent=2)) return generated_response + f"""\n\nMore relevant information on the slides {slides} "pages":{page_interval} """
[ "llama_index.llms.AzureOpenAI", "llama_index.embeddings.AzureOpenAIEmbedding", "llama_index.ServiceContext.from_defaults", "llama_index.VectorStoreIndex.from_vector_store", "llama_index.response.pprint_utils.pprint_source_node", "llama_index.vector_stores.WeaviateVectorStore" ]
[((707, 773), 'langchain.document_loaders.GutenbergLoader', 'GutenbergLoader', (['"""https://www.gutenberg.org/files/2591/2591-0.txt"""'], {}), "('https://www.gutenberg.org/files/2591/2591-0.txt')\n", (722, 773), False, 'from langchain.document_loaders import GutenbergLoader\n'), ((817, 892), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': '(500)', 'chunk_overlap': '(0)', 'length_function': 'len'}), '(chunk_size=500, chunk_overlap=0, length_function=len)\n', (838, 892), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((994, 1102), 'weaviate.Client', 'weaviate.Client', ([], {'url': 'WEAVIATE_URL', 'additional_headers': "{'X-OpenAI-Api-Key': os.environ['OPENAI_API_KEY']}"}), "(url=WEAVIATE_URL, additional_headers={'X-OpenAI-Api-Key':\n os.environ['OPENAI_API_KEY']})\n", (1009, 1102), False, 'import weaviate\n'), ((1464, 1504), 'langchain.vectorstores.Weaviate', 'Weaviate', (['client', '"""Paragraph"""', '"""content"""'], {}), "(client, 'Paragraph', 'content')\n", (1472, 1504), False, 'from langchain.vectorstores import Weaviate\n'), ((1827, 1856), 'os.getenv', 'os.getenv', (['"""AZURE_OPENAI_KEY"""'], {}), "('AZURE_OPENAI_KEY')\n", (1836, 1856), False, 'import os\n'), ((1874, 1908), 'os.getenv', 'os.getenv', (['"""AZURE_OPENAI_ENDPOINT"""'], {}), "('AZURE_OPENAI_ENDPOINT')\n", (1883, 1908), False, 'import os\n'), ((1925, 1951), 'os.getenv', 'os.getenv', (['"""RESOURCE_NAME"""'], {}), "('RESOURCE_NAME')\n", (1934, 1951), False, 'import os\n'), ((2862, 2891), 'os.listdir', 'os.listdir', (['subdirectory_path'], {}), '(subdirectory_path)\n', (2872, 2891), False, 'import os\n'), ((2016, 2050), 'os.getenv', 'os.getenv', (['"""AZURE_OPENAI_ENDPOINT"""'], {}), "('AZURE_OPENAI_ENDPOINT')\n", (2025, 2050), False, 'import os\n'), ((2064, 2093), 'os.getenv', 'os.getenv', (['"""AZURE_OPENAI_KEY"""'], {}), "('AZURE_OPENAI_KEY')\n", (2073, 2093), False, 'import os\n'), ((4802, 4881), 'weaviate.Client', 'weaviate.Client', ([], {'url': '"""http://localhost:8080"""', 'additional_headers': 'api_key_header'}), "(url='http://localhost:8080', additional_headers=api_key_header)\n", (4817, 4881), False, 'import weaviate\n'), ((2955, 2996), 'os.path.join', 'os.path.join', (['subdirectory_path', 'filename'], {}), '(subdirectory_path, filename)\n', (2967, 2996), False, 'import os\n'), ((7628, 7654), 'os.listdir', 'os.listdir', (['directory_path'], {}), '(directory_path)\n', (7638, 7654), False, 'import os\n'), ((10504, 10688), 'llama_index.llms.AzureOpenAI', 'llama_index.llms.AzureOpenAI', ([], {'model': '"""gpt-35-turbo-16k"""', 'deployment_name': '"""gpt-35-16k"""', 'api_key': 'azure_openai_key', 'azure_endpoint': 'azure_endpoint', 'api_version': '"""2023-03-15-preview"""'}), "(model='gpt-35-turbo-16k', deployment_name=\n 'gpt-35-16k', api_key=azure_openai_key, azure_endpoint=azure_endpoint,\n api_version='2023-03-15-preview')\n", (10532, 10688), False, 'import llama_index\n'), ((10800, 11005), 'llama_index.embeddings.AzureOpenAIEmbedding', 'llama_index.embeddings.AzureOpenAIEmbedding', ([], {'model': '"""text-embedding-ada-002"""', 'deployment_name': '"""te-ada-002"""', 'api_key': 'azure_openai_key', 'azure_endpoint': 'azure_endpoint', 'api_version': '"""2023-03-15-preview"""'}), "(model='text-embedding-ada-002',\n deployment_name='te-ada-002', api_key=azure_openai_key, azure_endpoint=\n azure_endpoint, api_version='2023-03-15-preview')\n", (10843, 11005), False, 'import llama_index\n'), ((11121, 11183), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model'}), '(llm=llm, embed_model=embed_model)\n', (11149, 11183), False, 'from llama_index import VectorStoreIndex, ServiceContext, set_global_service_context\n'), ((11213, 11308), 'llama_index.vector_stores.WeaviateVectorStore', 'WeaviateVectorStore', ([], {'weaviate_client': 'self.client', 'index_name': '"""Lectures"""', 'text_key': '"""content"""'}), "(weaviate_client=self.client, index_name='Lectures',\n text_key='content')\n", (11232, 11308), False, 'from llama_index.vector_stores import WeaviateVectorStore\n'), ((11574, 11602), 'llama_index.response.pprint_utils.pprint_source_node', 'pprint_source_node', (['nodes[0]'], {}), '(nodes[0])\n', (11592, 11602), False, 'from llama_index.response.pprint_utils import pprint_source_node\n'), ((13511, 13541), 'json.dumps', 'json.dumps', (['response'], {'indent': '(2)'}), '(response, indent=2)\n', (13521, 13541), False, 'import json\n'), ((3067, 3087), 'fitz.open', 'fitz.open', (['file_path'], {}), '(file_path)\n', (3076, 3087), False, 'import fitz\n'), ((7692, 7734), 'os.path.join', 'os.path.join', (['directory_path', 'subdirectory'], {}), '(directory_path, subdirectory)\n', (7704, 7734), False, 'import os\n'), ((7754, 7786), 'os.path.isdir', 'os.path.isdir', (['subdirectory_path'], {}), '(subdirectory_path)\n', (7767, 7786), False, 'import os\n'), ((3234, 3287), 'unstructured.cleaners.core.clean', 'clean', (['page_text'], {'bullets': '(True)', 'extra_whitespace': '(True)'}), '(page_text, bullets=True, extra_whitespace=True)\n', (3239, 3287), False, 'from unstructured.cleaners.core import clean\n'), ((11359, 11445), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', (['vector_store'], {'service_context': 'service_context'}), '(vector_store, service_context=\n service_context)\n', (11393, 11445), False, 'from llama_index import VectorStoreIndex, ServiceContext, set_global_service_context\n'), ((3970, 4023), 'unstructured.cleaners.core.clean', 'clean', (['last_page'], {'bullets': '(True)', 'extra_whitespace': '(True)'}), '(last_page, bullets=True, extra_whitespace=True)\n', (3975, 4023), False, 'from unstructured.cleaners.core import clean\n'), ((9510, 9528), 'time.sleep', 'time.sleep', (['(2 ** j)'], {}), '(2 ** j)\n', (9520, 9528), False, 'import time\n')]
"""Elasticsearch vector store.""" import asyncio import uuid from logging import getLogger from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast import nest_asyncio import numpy as np from llama_index.schema import BaseNode, MetadataMode, TextNode from llama_index.vector_stores.types import ( MetadataFilters, VectorStore, VectorStoreQuery, VectorStoreQueryMode, VectorStoreQueryResult, ) from llama_index.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict logger = getLogger(__name__) DISTANCE_STRATEGIES = Literal[ "COSINE", "DOT_PRODUCT", "EUCLIDEAN_DISTANCE", ] def _get_elasticsearch_client( *, es_url: Optional[str] = None, cloud_id: Optional[str] = None, api_key: Optional[str] = None, username: Optional[str] = None, password: Optional[str] = None, ) -> Any: """Get AsyncElasticsearch client. Args: es_url: Elasticsearch URL. cloud_id: Elasticsearch cloud ID. api_key: Elasticsearch API key. username: Elasticsearch username. password: Elasticsearch password. Returns: AsyncElasticsearch client. Raises: ConnectionError: If Elasticsearch client cannot connect to Elasticsearch. """ try: import elasticsearch except ImportError: raise ImportError( "Could not import elasticsearch python package. " "Please install it with `pip install elasticsearch`." ) if es_url and cloud_id: raise ValueError( "Both es_url and cloud_id are defined. Please provide only one." ) if es_url and cloud_id: raise ValueError( "Both es_url and cloud_id are defined. Please provide only one." ) connection_params: Dict[str, Any] = {} if es_url: connection_params["hosts"] = [es_url] elif cloud_id: connection_params["cloud_id"] = cloud_id else: raise ValueError("Please provide either elasticsearch_url or cloud_id.") if api_key: connection_params["api_key"] = api_key elif username and password: connection_params["basic_auth"] = (username, password) sync_es_client = elasticsearch.Elasticsearch( **connection_params, headers={"user-agent": ElasticsearchStore.get_user_agent()} ) async_es_client = elasticsearch.AsyncElasticsearch(**connection_params) try: sync_es_client.info() # so don't have to 'await' to just get info except Exception as e: logger.error(f"Error connecting to Elasticsearch: {e}") raise return async_es_client def _to_elasticsearch_filter(standard_filters: MetadataFilters) -> Dict[str, Any]: """Convert standard filters to Elasticsearch filter. Args: standard_filters: Standard Llama-index filters. Returns: Elasticsearch filter. """ if len(standard_filters.filters) == 1: filter = standard_filters.filters[0] return { "term": { f"metadata.{filter.key}.keyword": { "value": filter.value, } } } else: operands = [] for filter in standard_filters.filters: operands.append( { "term": { f"metadata.{filter.key}.keyword": { "value": filter.value, } } } ) return {"bool": {"must": operands}} def _to_llama_similarities(scores: List[float]) -> List[float]: if scores is None or len(scores) == 0: return [] scores_to_norm: np.ndarray = np.array(scores) return np.exp(scores_to_norm - np.max(scores_to_norm)).tolist() class ElasticsearchStore(VectorStore): """Elasticsearch vector store. Args: index_name: Name of the Elasticsearch index. es_client: Optional. Pre-existing AsyncElasticsearch client. es_url: Optional. Elasticsearch URL. es_cloud_id: Optional. Elasticsearch cloud ID. es_api_key: Optional. Elasticsearch API key. es_user: Optional. Elasticsearch username. es_password: Optional. Elasticsearch password. text_field: Optional. Name of the Elasticsearch field that stores the text. vector_field: Optional. Name of the Elasticsearch field that stores the embedding. batch_size: Optional. Batch size for bulk indexing. Defaults to 200. distance_strategy: Optional. Distance strategy to use for similarity search. Defaults to "COSINE". Raises: ConnectionError: If AsyncElasticsearch client cannot connect to Elasticsearch. ValueError: If neither es_client nor es_url nor es_cloud_id is provided. """ stores_text: bool = True def __init__( self, index_name: str, es_client: Optional[Any] = None, es_url: Optional[str] = None, es_cloud_id: Optional[str] = None, es_api_key: Optional[str] = None, es_user: Optional[str] = None, es_password: Optional[str] = None, text_field: str = "content", vector_field: str = "embedding", batch_size: int = 200, distance_strategy: Optional[DISTANCE_STRATEGIES] = "COSINE", ) -> None: nest_asyncio.apply() self.index_name = index_name self.text_field = text_field self.vector_field = vector_field self.batch_size = batch_size self.distance_strategy = distance_strategy if es_client is not None: self._client = es_client.options( headers={"user-agent": self.get_user_agent()} ) elif es_url is not None or es_cloud_id is not None: self._client = _get_elasticsearch_client( es_url=es_url, username=es_user, password=es_password, cloud_id=es_cloud_id, api_key=es_api_key, ) else: raise ValueError( """Either provide a pre-existing AsyncElasticsearch or valid \ credentials for creating a new connection.""" ) @property def client(self) -> Any: """Get async elasticsearch client.""" return self._client @staticmethod def get_user_agent() -> str: """Get user agent for elasticsearch client.""" import llama_index return f"llama_index-py-vs/{llama_index.__version__}" async def _create_index_if_not_exists( self, index_name: str, dims_length: Optional[int] = None ) -> None: """Create the AsyncElasticsearch index if it doesn't already exist. Args: index_name: Name of the AsyncElasticsearch index to create. dims_length: Length of the embedding vectors. """ if await self.client.indices.exists(index=index_name): logger.debug(f"Index {index_name} already exists. Skipping creation.") else: if dims_length is None: raise ValueError( "Cannot create index without specifying dims_length " "when the index doesn't already exist. We infer " "dims_length from the first embedding. Check that " "you have provided an embedding function." ) if self.distance_strategy == "COSINE": similarityAlgo = "cosine" elif self.distance_strategy == "EUCLIDEAN_DISTANCE": similarityAlgo = "l2_norm" elif self.distance_strategy == "DOT_PRODUCT": similarityAlgo = "dot_product" else: raise ValueError(f"Similarity {self.distance_strategy} not supported.") index_settings = { "mappings": { "properties": { self.vector_field: { "type": "dense_vector", "dims": dims_length, "index": True, "similarity": similarityAlgo, }, self.text_field: {"type": "text"}, "metadata": { "properties": { "document_id": {"type": "keyword"}, "doc_id": {"type": "keyword"}, "ref_doc_id": {"type": "keyword"}, } }, } } } logger.debug( f"Creating index {index_name} with mappings {index_settings['mappings']}" ) await self.client.indices.create(index=index_name, **index_settings) def add( self, nodes: List[BaseNode], *, create_index_if_not_exists: bool = True, **add_kwargs: Any, ) -> List[str]: """Add nodes to Elasticsearch index. Args: nodes: List of nodes with embeddings. create_index_if_not_exists: Optional. Whether to create the Elasticsearch index if it doesn't already exist. Defaults to True. Returns: List of node IDs that were added to the index. Raises: ImportError: If elasticsearch['async'] python package is not installed. BulkIndexError: If AsyncElasticsearch async_bulk indexing fails. """ return asyncio.get_event_loop().run_until_complete( self.async_add(nodes, create_index_if_not_exists=create_index_if_not_exists) ) async def async_add( self, nodes: List[BaseNode], *, create_index_if_not_exists: bool = True, **add_kwargs: Any, ) -> List[str]: """Asynchronous method to add nodes to Elasticsearch index. Args: nodes: List of nodes with embeddings. create_index_if_not_exists: Optional. Whether to create the AsyncElasticsearch index if it doesn't already exist. Defaults to True. Returns: List of node IDs that were added to the index. Raises: ImportError: If elasticsearch python package is not installed. BulkIndexError: If AsyncElasticsearch async_bulk indexing fails. """ try: from elasticsearch.helpers import BulkIndexError, async_bulk except ImportError: raise ImportError( "Could not import elasticsearch[async] python package. " "Please install it with `pip install 'elasticsearch[async]'`." ) if len(nodes) == 0: return [] if create_index_if_not_exists: dims_length = len(nodes[0].get_embedding()) await self._create_index_if_not_exists( index_name=self.index_name, dims_length=dims_length ) embeddings: List[List[float]] = [] texts: List[str] = [] metadatas: List[dict] = [] ids: List[str] = [] for node in nodes: ids.append(node.node_id) embeddings.append(node.get_embedding()) texts.append(node.get_content(metadata_mode=MetadataMode.NONE)) metadatas.append(node_to_metadata_dict(node, remove_text=True)) requests = [] return_ids = [] for i, text in enumerate(texts): metadata = metadatas[i] if metadatas else {} _id = ids[i] if ids else str(uuid.uuid4()) request = { "_op_type": "index", "_index": self.index_name, self.vector_field: embeddings[i], self.text_field: text, "metadata": metadata, "_id": _id, } requests.append(request) return_ids.append(_id) await async_bulk( self.client, requests, chunk_size=self.batch_size, refresh=True ) try: success, failed = await async_bulk( self.client, requests, stats_only=True, refresh=True ) logger.debug(f"Added {success} and failed to add {failed} texts to index") logger.debug(f"added texts {ids} to index") return return_ids except BulkIndexError as e: logger.error(f"Error adding texts: {e}") firstError = e.errors[0].get("index", {}).get("error", {}) logger.error(f"First error reason: {firstError.get('reason')}") raise def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None: """Delete node from Elasticsearch index. Args: ref_doc_id: ID of the node to delete. delete_kwargs: Optional. Additional arguments to pass to Elasticsearch delete_by_query. Raises: Exception: If Elasticsearch delete_by_query fails. """ return asyncio.get_event_loop().run_until_complete( self.adelete(ref_doc_id, **delete_kwargs) ) async def adelete(self, ref_doc_id: str, **delete_kwargs: Any) -> None: """Async delete node from Elasticsearch index. Args: ref_doc_id: ID of the node to delete. delete_kwargs: Optional. Additional arguments to pass to AsyncElasticsearch delete_by_query. Raises: Exception: If AsyncElasticsearch delete_by_query fails. """ try: async with self.client as client: res = await client.delete_by_query( index=self.index_name, query={"term": {"metadata.ref_doc_id": ref_doc_id}}, refresh=True, **delete_kwargs, ) if res["deleted"] == 0: logger.warning(f"Could not find text {ref_doc_id} to delete") else: logger.debug(f"Deleted text {ref_doc_id} from index") except Exception: logger.error(f"Error deleting text: {ref_doc_id}") raise def query( self, query: VectorStoreQuery, custom_query: Optional[ Callable[[Dict, Union[VectorStoreQuery, None]], Dict] ] = None, es_filter: Optional[List[Dict]] = None, **kwargs: Any, ) -> VectorStoreQueryResult: """Query index for top k most similar nodes. Args: query_embedding (List[float]): query embedding custom_query: Optional. custom query function that takes in the es query body and returns a modified query body. This can be used to add additional query parameters to the Elasticsearch query. es_filter: Optional. Elasticsearch filter to apply to the query. If filter is provided in the query, this filter will be ignored. Returns: VectorStoreQueryResult: Result of the query. Raises: Exception: If Elasticsearch query fails. """ return asyncio.get_event_loop().run_until_complete( self.aquery(query, custom_query, es_filter, **kwargs) ) async def aquery( self, query: VectorStoreQuery, custom_query: Optional[ Callable[[Dict, Union[VectorStoreQuery, None]], Dict] ] = None, es_filter: Optional[List[Dict]] = None, **kwargs: Any, ) -> VectorStoreQueryResult: """Asynchronous query index for top k most similar nodes. Args: query_embedding (VectorStoreQuery): query embedding custom_query: Optional. custom query function that takes in the es query body and returns a modified query body. This can be used to add additional query parameters to the AsyncElasticsearch query. es_filter: Optional. AsyncElasticsearch filter to apply to the query. If filter is provided in the query, this filter will be ignored. Returns: VectorStoreQueryResult: Result of the query. Raises: Exception: If AsyncElasticsearch query fails. """ query_embedding = cast(List[float], query.query_embedding) es_query = {} if query.filters is not None and len(query.filters.filters) > 0: filter = [_to_elasticsearch_filter(query.filters)] else: filter = es_filter or [] if query.mode in ( VectorStoreQueryMode.DEFAULT, VectorStoreQueryMode.HYBRID, ): es_query["knn"] = { "filter": filter, "field": self.vector_field, "query_vector": query_embedding, "k": query.similarity_top_k, "num_candidates": query.similarity_top_k * 10, } if query.mode in ( VectorStoreQueryMode.TEXT_SEARCH, VectorStoreQueryMode.HYBRID, ): es_query["query"] = { "bool": { "must": {"match": {self.text_field: {"query": query.query_str}}}, "filter": filter, } } if query.mode == VectorStoreQueryMode.HYBRID: es_query["rank"] = {"rrf": {}} if custom_query is not None: es_query = custom_query(es_query, query) logger.debug(f"Calling custom_query, Query body now: {es_query}") async with self.client as client: response = await client.search( index=self.index_name, **es_query, size=query.similarity_top_k, _source={"excludes": [self.vector_field]}, ) top_k_nodes = [] top_k_ids = [] top_k_scores = [] hits = response["hits"]["hits"] for hit in hits: source = hit["_source"] metadata = source.get("metadata", None) text = source.get(self.text_field, None) node_id = hit["_id"] try: node = metadata_dict_to_node(metadata) node.text = text except Exception: # Legacy support for old metadata format logger.warning( f"Could not parse metadata from hit {hit['_source']['metadata']}" ) node_info = source.get("node_info") relationships = source.get("relationships") start_char_idx = None end_char_idx = None if isinstance(node_info, dict): start_char_idx = node_info.get("start", None) end_char_idx = node_info.get("end", None) node = TextNode( text=text, metadata=metadata, id_=node_id, start_char_idx=start_char_idx, end_char_idx=end_char_idx, relationships=relationships, ) top_k_nodes.append(node) top_k_ids.append(node_id) top_k_scores.append(hit.get("_rank", hit["_score"])) if query.mode == VectorStoreQueryMode.HYBRID: total_rank = sum(top_k_scores) top_k_scores = [total_rank - rank / total_rank for rank in top_k_scores] return VectorStoreQueryResult( nodes=top_k_nodes, ids=top_k_ids, similarities=_to_llama_similarities(top_k_scores), )
[ "llama_index.schema.TextNode", "llama_index.vector_stores.utils.metadata_dict_to_node", "llama_index.vector_stores.utils.node_to_metadata_dict" ]
[((534, 553), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (543, 553), False, 'from logging import getLogger\n'), ((2379, 2432), 'elasticsearch.AsyncElasticsearch', 'elasticsearch.AsyncElasticsearch', ([], {}), '(**connection_params)\n', (2411, 2432), False, 'import elasticsearch\n'), ((3728, 3744), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (3736, 3744), True, 'import numpy as np\n'), ((5409, 5429), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (5427, 5429), False, 'import nest_asyncio\n'), ((16807, 16847), 'typing.cast', 'cast', (['List[float]', 'query.query_embedding'], {}), '(List[float], query.query_embedding)\n', (16811, 16847), False, 'from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast\n'), ((12293, 12368), 'elasticsearch.helpers.async_bulk', 'async_bulk', (['self.client', 'requests'], {'chunk_size': 'self.batch_size', 'refresh': '(True)'}), '(self.client, requests, chunk_size=self.batch_size, refresh=True)\n', (12303, 12368), False, 'from elasticsearch.helpers import BulkIndexError, async_bulk\n'), ((9758, 9782), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (9780, 9782), False, 'import asyncio\n'), ((11685, 11730), 'llama_index.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(True)'}), '(node, remove_text=True)\n', (11706, 11730), False, 'from llama_index.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((12440, 12504), 'elasticsearch.helpers.async_bulk', 'async_bulk', (['self.client', 'requests'], {'stats_only': '(True)', 'refresh': '(True)'}), '(self.client, requests, stats_only=True, refresh=True)\n', (12450, 12504), False, 'from elasticsearch.helpers import BulkIndexError, async_bulk\n'), ((13378, 13402), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (13400, 13402), False, 'import asyncio\n'), ((15585, 15609), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (15607, 15609), False, 'import asyncio\n'), ((18698, 18729), 'llama_index.vector_stores.utils.metadata_dict_to_node', 'metadata_dict_to_node', (['metadata'], {}), '(metadata)\n', (18719, 18729), False, 'from llama_index.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((3780, 3802), 'numpy.max', 'np.max', (['scores_to_norm'], {}), '(scores_to_norm)\n', (3786, 3802), True, 'import numpy as np\n'), ((11919, 11931), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (11929, 11931), False, 'import uuid\n'), ((19372, 19515), 'llama_index.schema.TextNode', 'TextNode', ([], {'text': 'text', 'metadata': 'metadata', 'id_': 'node_id', 'start_char_idx': 'start_char_idx', 'end_char_idx': 'end_char_idx', 'relationships': 'relationships'}), '(text=text, metadata=metadata, id_=node_id, start_char_idx=\n start_char_idx, end_char_idx=end_char_idx, relationships=relationships)\n', (19380, 19515), False, 'from llama_index.schema import BaseNode, MetadataMode, TextNode\n')]