Spaces:
Sleeping
Sleeping
File size: 1,350 Bytes
001fdb5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 |
import chromadb
from sentence_transformers import CrossEncoder, SentenceTransformer
def chroma_client_setup():
chroma_client = chromadb.Client()
collection = client.create_collection(
name="food_collection",
metadata={"hnsw:space": "cosine"} # l2 is the default
)
return collection
def embedding_function(items_to_embed: list[str]):
sentence_model = SentenceTransformer(
"mixedbread-ai/mxbai-embed-large-v1"
)
embedded_items = sentence_model.encode(
items_to_embed,
show_progress_bar=True
)
return embedded_items
def chroma_upserting(collection, embeddings:list[list[str]], payload:list[dict]):
collection.add(
documents=[item['doc'] for item in payload],
embeddings=embeddings,
metadatas=payload,
ids=[f"id{item}" for item in range(len(embedfings))]
)
def search_chroma(collection, query:str):
results = collection.query(
query_embeddings=embedding_function([query]),
n_results=5
)
return results
def reranking_results(query: str, top_k_results: list[str]):
# Load the model, here we use our base sized model
rerank_model = CrossEncoder("mixedbread-ai/mxbai-rerank-xsmall-v1")
reranked_results = rerank_model.rank(query, top_k_results, return_documents=True)
return reranked_results
|