from datasets import load_dataset, load_from_disk, Dataset from transformers import AutoTokenizer, AutoModel import torch import pandas as pd model_ckpt = "nomic-ai/nomic-embed-text-v1.5" tokenizer = AutoTokenizer.from_pretrained(model_ckpt) model = AutoModel.from_pretrained(model_ckpt, trust_remote_code=True) device = torch.device("cpu") model.to(device) def cls_pooling(model_output): return model_output.last_hidden_state[:, 0] def get_embeddings(text_list): encoded_input = tokenizer( text_list, padding=True, truncation=True, return_tensors="pt" ) encoded_input = {k: v.to(device) for k, v in encoded_input.items()} model_output = model(**encoded_input) return cls_pooling(model_output) embeddings_dataset = Dataset.load_from_disk("dataset/embeddings") embeddings_dataset.load_faiss_index("embeddings", "index/embeddings") question = "Download license key" question_embedding = get_embeddings([question]).cpu().detach().numpy() scores, samples = embeddings_dataset.get_nearest_examples( "embeddings", question_embedding, k=10 ) samples_df = pd.DataFrame.from_dict(samples) samples_df["scores"] = scores samples_df.sort_values("scores", ascending=True, inplace=True) for _, row in samples_df.iterrows(): print(f"COMMENT: {row.text}") print(f"SCORE: {row.scores}") print(f"PROMPT: {row.prompt}") print("=" * 50) print()