|
--- |
|
license: mit |
|
language: |
|
- en |
|
tags: |
|
- sparse sparsity quantized onnx embeddings int8 |
|
- mteb |
|
model-index: |
|
- name: bge-large-en-v1.5-sparse |
|
results: |
|
- task: |
|
type: STS |
|
dataset: |
|
type: mteb/biosses-sts |
|
name: MTEB BIOSSES |
|
config: default |
|
split: test |
|
revision: d3fb88f8f02e40887cd149695127462bbcf29b4a |
|
metrics: |
|
- type: cos_sim_pearson |
|
value: 87.73305831153709 |
|
- type: cos_sim_spearman |
|
value: 85.64351771070989 |
|
- type: euclidean_pearson |
|
value: 86.06880877736519 |
|
- type: euclidean_spearman |
|
value: 85.60676988543395 |
|
- type: manhattan_pearson |
|
value: 85.69108036145253 |
|
- type: manhattan_spearman |
|
value: 85.05314281283421 |
|
- task: |
|
type: STS |
|
dataset: |
|
type: mteb/sickr-sts |
|
name: MTEB SICK-R |
|
config: default |
|
split: test |
|
revision: a6ea5a8cab320b040a23452cc28066d9beae2cee |
|
metrics: |
|
- type: cos_sim_pearson |
|
value: 85.61833776000717 |
|
- type: cos_sim_spearman |
|
value: 80.73718686921521 |
|
- type: euclidean_pearson |
|
value: 83.9368704709159 |
|
- type: euclidean_spearman |
|
value: 80.64477415487963 |
|
- type: manhattan_pearson |
|
value: 83.92383757341743 |
|
- type: manhattan_spearman |
|
value: 80.59625506933862 |
|
- task: |
|
type: STS |
|
dataset: |
|
type: mteb/sts12-sts |
|
name: MTEB STS12 |
|
config: default |
|
split: test |
|
revision: a0d554a64d88156834ff5ae9920b964011b16384 |
|
metrics: |
|
- type: cos_sim_pearson |
|
value: 83.81272888013494 |
|
- type: cos_sim_spearman |
|
value: 76.07038564455931 |
|
- type: euclidean_pearson |
|
value: 80.33676600912023 |
|
- type: euclidean_spearman |
|
value: 75.86575335744111 |
|
- type: manhattan_pearson |
|
value: 80.36973770593211 |
|
- type: manhattan_spearman |
|
value: 75.88787860200954 |
|
- task: |
|
type: STS |
|
dataset: |
|
type: mteb/sts13-sts |
|
name: MTEB STS13 |
|
config: default |
|
split: test |
|
revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca |
|
metrics: |
|
- type: cos_sim_pearson |
|
value: 85.58781524090651 |
|
- type: cos_sim_spearman |
|
value: 86.80508359626748 |
|
- type: euclidean_pearson |
|
value: 85.22891409219575 |
|
- type: euclidean_spearman |
|
value: 85.78295876926319 |
|
- type: manhattan_pearson |
|
value: 85.2193177032458 |
|
- type: manhattan_spearman |
|
value: 85.74049940198427 |
|
- task: |
|
type: STS |
|
dataset: |
|
type: mteb/sts14-sts |
|
name: MTEB STS14 |
|
config: default |
|
split: test |
|
revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375 |
|
metrics: |
|
- type: cos_sim_pearson |
|
value: 84.0862821699066 |
|
- type: cos_sim_spearman |
|
value: 81.67856196476185 |
|
- type: euclidean_pearson |
|
value: 83.38475353138897 |
|
- type: euclidean_spearman |
|
value: 81.45279784228292 |
|
- type: manhattan_pearson |
|
value: 83.29235221714131 |
|
- type: manhattan_spearman |
|
value: 81.3971683104493 |
|
- task: |
|
type: STS |
|
dataset: |
|
type: mteb/sts15-sts |
|
name: MTEB STS15 |
|
config: default |
|
split: test |
|
revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3 |
|
metrics: |
|
- type: cos_sim_pearson |
|
value: 87.44459051393112 |
|
- type: cos_sim_spearman |
|
value: 88.74673154561383 |
|
- type: euclidean_pearson |
|
value: 88.13112382236628 |
|
- type: euclidean_spearman |
|
value: 88.56241954487271 |
|
- type: manhattan_pearson |
|
value: 88.11098632041256 |
|
- type: manhattan_spearman |
|
value: 88.55607051247829 |
|
- task: |
|
type: STS |
|
dataset: |
|
type: mteb/sts16-sts |
|
name: MTEB STS16 |
|
config: default |
|
split: test |
|
revision: 4d8694f8f0e0100860b497b999b3dbed754a0513 |
|
metrics: |
|
- type: cos_sim_pearson |
|
value: 82.8825746257794 |
|
- type: cos_sim_spearman |
|
value: 84.6066555379785 |
|
- type: euclidean_pearson |
|
value: 84.12438131112606 |
|
- type: euclidean_spearman |
|
value: 84.75862802179907 |
|
- type: manhattan_pearson |
|
value: 84.12791217960807 |
|
- type: manhattan_spearman |
|
value: 84.7739597139034 |
|
- task: |
|
type: STS |
|
dataset: |
|
type: mteb/sts17-crosslingual-sts |
|
name: MTEB STS17 (en-en) |
|
config: en-en |
|
split: test |
|
revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d |
|
metrics: |
|
- type: cos_sim_pearson |
|
value: 89.19971502207773 |
|
- type: cos_sim_spearman |
|
value: 89.75109780507901 |
|
- type: euclidean_pearson |
|
value: 89.5913898113725 |
|
- type: euclidean_spearman |
|
value: 89.20244860773123 |
|
- type: manhattan_pearson |
|
value: 89.68755363801112 |
|
- type: manhattan_spearman |
|
value: 89.3105024782381 |
|
- task: |
|
type: STS |
|
dataset: |
|
type: mteb/sts22-crosslingual-sts |
|
name: MTEB STS22 (en) |
|
config: en |
|
split: test |
|
revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 |
|
metrics: |
|
- type: cos_sim_pearson |
|
value: 61.73885819503523 |
|
- type: cos_sim_spearman |
|
value: 64.09521607825829 |
|
- type: euclidean_pearson |
|
value: 64.22116001518724 |
|
- type: euclidean_spearman |
|
value: 63.84189650719827 |
|
- type: manhattan_pearson |
|
value: 64.23930191730729 |
|
- type: manhattan_spearman |
|
value: 63.7536172795383 |
|
- task: |
|
type: STS |
|
dataset: |
|
type: mteb/stsbenchmark-sts |
|
name: MTEB STSBenchmark |
|
config: default |
|
split: test |
|
revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831 |
|
metrics: |
|
- type: cos_sim_pearson |
|
value: 85.68505574064375 |
|
- type: cos_sim_spearman |
|
value: 86.87614324154406 |
|
- type: euclidean_pearson |
|
value: 86.96751967489614 |
|
- type: euclidean_spearman |
|
value: 86.78979082790067 |
|
- type: manhattan_pearson |
|
value: 86.92578795715433 |
|
- type: manhattan_spearman |
|
value: 86.74076104131726 |
|
- task: |
|
type: PairClassification |
|
dataset: |
|
type: mteb/sprintduplicatequestions-pairclassification |
|
name: MTEB SprintDuplicateQuestions |
|
config: default |
|
split: test |
|
revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46 |
|
metrics: |
|
- type: cos_sim_accuracy |
|
value: 99.80990099009901 |
|
- type: cos_sim_ap |
|
value: 95.00187845875503 |
|
- type: cos_sim_f1 |
|
value: 90.37698412698413 |
|
- type: cos_sim_precision |
|
value: 89.66535433070865 |
|
- type: cos_sim_recall |
|
value: 91.10000000000001 |
|
- type: dot_accuracy |
|
value: 99.63366336633663 |
|
- type: dot_ap |
|
value: 87.6642728041652 |
|
- type: dot_f1 |
|
value: 81.40803173029252 |
|
- type: dot_precision |
|
value: 80.7276302851524 |
|
- type: dot_recall |
|
value: 82.1 |
|
- type: euclidean_accuracy |
|
value: 99.8079207920792 |
|
- type: euclidean_ap |
|
value: 94.88531851782375 |
|
- type: euclidean_f1 |
|
value: 90.49019607843137 |
|
- type: euclidean_precision |
|
value: 88.75 |
|
- type: euclidean_recall |
|
value: 92.30000000000001 |
|
- type: manhattan_accuracy |
|
value: 99.81188118811882 |
|
- type: manhattan_ap |
|
value: 94.87944331919043 |
|
- type: manhattan_f1 |
|
value: 90.5 |
|
- type: manhattan_precision |
|
value: 90.5 |
|
- type: manhattan_recall |
|
value: 90.5 |
|
- type: max_accuracy |
|
value: 99.81188118811882 |
|
- type: max_ap |
|
value: 95.00187845875503 |
|
- type: max_f1 |
|
value: 90.5 |
|
- task: |
|
type: PairClassification |
|
dataset: |
|
type: mteb/twittersemeval2015-pairclassification |
|
name: MTEB TwitterSemEval2015 |
|
config: default |
|
split: test |
|
revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1 |
|
metrics: |
|
- type: cos_sim_accuracy |
|
value: 86.3861238600465 |
|
- type: cos_sim_ap |
|
value: 74.50058066578084 |
|
- type: cos_sim_f1 |
|
value: 69.25949774629748 |
|
- type: cos_sim_precision |
|
value: 67.64779874213836 |
|
- type: cos_sim_recall |
|
value: 70.94986807387863 |
|
- type: dot_accuracy |
|
value: 81.57000655659535 |
|
- type: dot_ap |
|
value: 59.10193583653485 |
|
- type: dot_f1 |
|
value: 58.39352155832786 |
|
- type: dot_precision |
|
value: 49.88780852655198 |
|
- type: dot_recall |
|
value: 70.3957783641161 |
|
- type: euclidean_accuracy |
|
value: 86.37420277761221 |
|
- type: euclidean_ap |
|
value: 74.41671247141966 |
|
- type: euclidean_f1 |
|
value: 69.43907156673114 |
|
- type: euclidean_precision |
|
value: 64.07853636769299 |
|
- type: euclidean_recall |
|
value: 75.77836411609499 |
|
- type: manhattan_accuracy |
|
value: 86.30267628300649 |
|
- type: manhattan_ap |
|
value: 74.34438603336339 |
|
- type: manhattan_f1 |
|
value: 69.41888619854721 |
|
- type: manhattan_precision |
|
value: 64.13870246085011 |
|
- type: manhattan_recall |
|
value: 75.64643799472296 |
|
- type: max_accuracy |
|
value: 86.3861238600465 |
|
- type: max_ap |
|
value: 74.50058066578084 |
|
- type: max_f1 |
|
value: 69.43907156673114 |
|
- task: |
|
type: PairClassification |
|
dataset: |
|
type: mteb/twitterurlcorpus-pairclassification |
|
name: MTEB TwitterURLCorpus |
|
config: default |
|
split: test |
|
revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf |
|
metrics: |
|
- type: cos_sim_accuracy |
|
value: 88.87530562347187 |
|
- type: cos_sim_ap |
|
value: 85.69496469410068 |
|
- type: cos_sim_f1 |
|
value: 77.96973052787007 |
|
- type: cos_sim_precision |
|
value: 74.8900865125514 |
|
- type: cos_sim_recall |
|
value: 81.3135201724669 |
|
- type: dot_accuracy |
|
value: 86.70780455621532 |
|
- type: dot_ap |
|
value: 80.03489678512908 |
|
- type: dot_f1 |
|
value: 73.26376129933124 |
|
- type: dot_precision |
|
value: 70.07591733445804 |
|
- type: dot_recall |
|
value: 76.75546658453958 |
|
- type: euclidean_accuracy |
|
value: 88.85978189156674 |
|
- type: euclidean_ap |
|
value: 85.67894953317325 |
|
- type: euclidean_f1 |
|
value: 78.04295942720763 |
|
- type: euclidean_precision |
|
value: 75.67254845241538 |
|
- type: euclidean_recall |
|
value: 80.56667693255312 |
|
- type: manhattan_accuracy |
|
value: 88.88306748942446 |
|
- type: manhattan_ap |
|
value: 85.66556510677526 |
|
- type: manhattan_f1 |
|
value: 78.06278290950576 |
|
- type: manhattan_precision |
|
value: 74.76912231230173 |
|
- type: manhattan_recall |
|
value: 81.65999384046813 |
|
- type: max_accuracy |
|
value: 88.88306748942446 |
|
- type: max_ap |
|
value: 85.69496469410068 |
|
- type: max_f1 |
|
value: 78.06278290950576 |
|
--- |
|
|
|
# bge-large-en-v1.5-sparse |
|
|
|
## Usage |
|
|
|
This is the sparse ONNX variant of the [bge-small-en-v1.5](https://huggingface.co/BAAI/bge-small-en-v1.5) embeddings model accelerated with [Sparsify](https://github.com/neuralmagic/sparsify) for quantization/pruning and [DeepSparseSentenceTransformers](https://github.com/neuralmagic/deepsparse/tree/main/src/deepsparse/sentence_transformers) for inference. |
|
|
|
```bash |
|
pip install -U deepsparse-nightly[sentence_transformers] |
|
``` |
|
|
|
```python |
|
from deepsparse.sentence_transformers import DeepSparseSentenceTransformer |
|
model = DeepSparseSentenceTransformer('neuralmagic/bge-large-en-v1.5-sparse', export=False) |
|
|
|
# Our sentences we like to encode |
|
sentences = ['This framework generates embeddings for each input sentence', |
|
'Sentences are passed as a list of string.', |
|
'The quick brown fox jumps over the lazy dog.'] |
|
|
|
# Sentences are encoded by calling model.encode() |
|
embeddings = model.encode(sentences) |
|
|
|
# Print the embeddings |
|
for sentence, embedding in zip(sentences, embeddings): |
|
print("Sentence:", sentence) |
|
print("Embedding:", embedding.shape) |
|
print("") |
|
``` |
|
|
|
For general questions on these models and sparsification methods, reach out to the engineering team on our [community Slack](https://join.slack.com/t/discuss-neuralmagic/shared_invite/zt-q1a1cnvo-YBoICSIw3L1dmQpjBeDurQ). |