spacemanidol
commited on
Update README.md
Browse files
README.md
CHANGED
@@ -7699,8 +7699,8 @@ for query, query_scores in zip(queries, scores):
|
|
7699 |
|
7700 |
#### Variation: Truncated Embeddings ####
|
7701 |
query_embeddings_256 = normalize(torch.from_numpy(query_embeddings)[:, :256])
|
7702 |
-
|
7703 |
-
scores_256 = query_embeddings_256 @
|
7704 |
|
7705 |
# Pretty-print the results.
|
7706 |
for query, query_scores in zip(queries, scores_256):
|
@@ -7752,11 +7752,11 @@ document_tokens = tokenizer(documents, padding=True, truncation=True, return_te
|
|
7752 |
# Use the model to generate text embeddings.
|
7753 |
with torch.inference_mode():
|
7754 |
query_embeddings = model(**query_tokens)[0][:, 0]
|
7755 |
-
|
7756 |
|
7757 |
# Remember to normalize embeddings.
|
7758 |
query_embeddings = normalize(query_embeddings)
|
7759 |
-
|
7760 |
|
7761 |
# Scores via dotproduct.
|
7762 |
scores = query_embeddings @ document_embeddings.T
|
@@ -7782,8 +7782,8 @@ for query, query_scores in zip(queries, scores):
|
|
7782 |
|
7783 |
#### Variation: Truncated Embeddings ####
|
7784 |
query_embeddings_256 = normalize(query_embeddings[:, :256])
|
7785 |
-
|
7786 |
-
scores_256 = query_embeddings_256 @
|
7787 |
|
7788 |
# Pretty-print the results.
|
7789 |
for query, query_scores in zip(queries, scores_256):
|
|
|
7699 |
|
7700 |
#### Variation: Truncated Embeddings ####
|
7701 |
query_embeddings_256 = normalize(torch.from_numpy(query_embeddings)[:, :256])
|
7702 |
+
document_embeddings_256 = normalize(torch.from_numpy(document_embeddings)[:, :256])
|
7703 |
+
scores_256 = query_embeddings_256 @ document_embeddings_256.T
|
7704 |
|
7705 |
# Pretty-print the results.
|
7706 |
for query, query_scores in zip(queries, scores_256):
|
|
|
7752 |
# Use the model to generate text embeddings.
|
7753 |
with torch.inference_mode():
|
7754 |
query_embeddings = model(**query_tokens)[0][:, 0]
|
7755 |
+
document_embeddings = model(**document_tokens)[0][:, 0]
|
7756 |
|
7757 |
# Remember to normalize embeddings.
|
7758 |
query_embeddings = normalize(query_embeddings)
|
7759 |
+
document_embeddings = normalize(document_embeddings)
|
7760 |
|
7761 |
# Scores via dotproduct.
|
7762 |
scores = query_embeddings @ document_embeddings.T
|
|
|
7782 |
|
7783 |
#### Variation: Truncated Embeddings ####
|
7784 |
query_embeddings_256 = normalize(query_embeddings[:, :256])
|
7785 |
+
document_embeddings_256 = normalize(document_embeddings[:, :256])
|
7786 |
+
scores_256 = query_embeddings_256 @ document_embeddings_256.T
|
7787 |
|
7788 |
# Pretty-print the results.
|
7789 |
for query, query_scores in zip(queries, scores_256):
|