rag_chat_with_analytics / llamaindex.py
pvanand's picture
Upload 11 files
1a6d961 verified
raw
history blame
1.12 kB
# %pip install llama-index llama-index-vector-stores-lancedb
# %pip install lancedb==0.6.13 #Only required if the above cell installs an older version of lancedb (pypi package may not be released yet)
# %pip install llama-index-embeddings-fastembed
# pip install llama-index-readers-file
from llama_index.core import Settings, SimpleDirectoryReader, VectorStoreIndex
from llama_index.vector_stores.lancedb import LanceDBVectorStore
from llama_index.embeddings.fastembed import FastEmbedEmbedding
# Configure global settings
Settings.embed_model = FastEmbedEmbedding(model_name="BAAI/bge-small-en-v1.5")
# Setup LanceDB vector store
vector_store = LanceDBVectorStore(
uri="./lancedb",
mode="overwrite",
query_type="vector"
)
# Load your documents
documents = SimpleDirectoryReader("D:\DEV\LIZMOTORS\LANGCHAIN\digiyatrav2\chatbot\data").load_data()
# Create the index
index = VectorStoreIndex.from_documents(
documents,
vector_store=vector_store
)
# Create a retriever
retriever = index.as_retriever()
response = retriever.retrieve("Your query here")
print(response)