File size: 1,445 Bytes
0416ac9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
from datasets import load_dataset, load_from_disk, Dataset
from transformers import AutoTokenizer, AutoModel
import torch
import pandas as pd

model_ckpt = "nomic-ai/nomic-embed-text-v1.5"


tokenizer = AutoTokenizer.from_pretrained(model_ckpt)
model = AutoModel.from_pretrained(model_ckpt, trust_remote_code=True)

device = torch.device("cpu")
model.to(device)

def cls_pooling(model_output):
    return model_output.last_hidden_state[:, 0]

def get_embeddings(text_list):
    encoded_input = tokenizer(
        text_list, padding=True, truncation=True, return_tensors="pt"
    )
    encoded_input = {k: v.to(device) for k, v in encoded_input.items()}
    model_output = model(**encoded_input)
    return cls_pooling(model_output)


embeddings_dataset = Dataset.load_from_disk("dataset/embeddings")

embeddings_dataset.load_faiss_index("embeddings", "index/embeddings")

question = "Download license key"

question_embedding = get_embeddings([question]).cpu().detach().numpy()

scores, samples = embeddings_dataset.get_nearest_examples(
    "embeddings", question_embedding, k=10
)

samples_df = pd.DataFrame.from_dict(samples)
samples_df["scores"] = scores
samples_df.sort_values("scores", ascending=True, inplace=True)

for _, row in samples_df.iterrows():
    print(f"COMMENT: {row.text}")
    print(f"SCORE: {row.scores}")
    print(f"PROMPT: {row.prompt}")
    print("=" * 50)
    print()