yco commited on
Commit
c1d6484
·
verified ·
1 Parent(s): 2de788d

Add new SentenceTransformer model with an onnx backend

Browse files

Hello!

*This pull request has been automatically generated from the [`push_to_hub`](https://sbert.net/docs/package_reference/sentence_transformer/SentenceTransformer.html#sentence_transformers.SentenceTransformer.push_to_hub) method from the Sentence Transformers library.*

## Full Model Architecture:
```
SentenceTransformer(
(0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: ORTModelForFeatureExtraction
(1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})
(2): Normalize()
)
```

## Tip:
Consider testing this pull request before merging by loading the model from this PR with the `revision` argument:
```python
from sentence_transformers import SentenceTransformer

# TODO: Fill in the PR number
pr_number = 2
model = SentenceTransformer(
"yco/bilingual-embedding-base",
revision=f"refs/pr/{pr_number}",
backend="onnx",
)

# Verify that everything works as expected
embeddings = model.encode(["The weather is lovely today.", "It's so sunny outside!", "He drove to the stadium."])
print(embeddings.shape)

similarities = model.similarity(embeddings, embeddings)
print(similarities)
```

config.json CHANGED
@@ -1,21 +1,20 @@
1
  {
2
- "_name_or_path": "dangvantuan/bilingual_impl",
3
  "architectures": [
4
  "BilingualModel"
5
  ],
6
- "model_type": "xlm-roberta",
7
  "auto_map": {
8
- "AutoConfig":"dangvantuan/bilingual_impl--config.BilingualConfig",
9
  "AutoModel": "dangvantuan/bilingual_impl--modeling.BilingualModel",
10
  "AutoModelForMaskedLM": "dangvantuan/bilingual_impl--modeling.BilingualForMaskedLM",
11
  "AutoModelForMultipleChoice": "dangvantuan/bilingual_impl--modeling.BilingualForMultipleChoice",
12
  "AutoModelForQuestionAnswering": "dangvantuan/bilingual_impl--modeling.BilingualForQuestionAnswering",
13
  "AutoModelForSequenceClassification": "dangvantuan/bilingual_impl--modeling.BilingualForSequenceClassification",
14
  "AutoModelForTokenClassification": "dangvantuan/bilingual_impl--modeling.BilingualForTokenClassification"
15
- },
16
- "attention_probs_dropout_prob": 0.1,
17
- "classifier_dropout": null,
18
  "bos_token_id": 0,
 
19
  "eos_token_id": 2,
20
  "hidden_act": "gelu",
21
  "hidden_dropout_prob": 0.1,
@@ -24,13 +23,14 @@
24
  "intermediate_size": 3072,
25
  "layer_norm_eps": 1e-05,
26
  "max_position_embeddings": 514,
 
27
  "num_attention_heads": 12,
28
  "num_hidden_layers": 12,
29
  "output_past": true,
30
  "pad_token_id": 1,
31
  "position_embedding_type": "absolute",
32
  "torch_dtype": "float16",
33
- "transformers_version": "4.39.1",
34
  "type_vocab_size": 1,
35
  "use_cache": true,
36
  "vocab_size": 250002
 
1
  {
2
+ "_name_or_path": "yco/bilingual-embedding-base",
3
  "architectures": [
4
  "BilingualModel"
5
  ],
6
+ "attention_probs_dropout_prob": 0.1,
7
  "auto_map": {
8
+ "AutoConfig": "dangvantuan/bilingual_impl--config.BilingualConfig",
9
  "AutoModel": "dangvantuan/bilingual_impl--modeling.BilingualModel",
10
  "AutoModelForMaskedLM": "dangvantuan/bilingual_impl--modeling.BilingualForMaskedLM",
11
  "AutoModelForMultipleChoice": "dangvantuan/bilingual_impl--modeling.BilingualForMultipleChoice",
12
  "AutoModelForQuestionAnswering": "dangvantuan/bilingual_impl--modeling.BilingualForQuestionAnswering",
13
  "AutoModelForSequenceClassification": "dangvantuan/bilingual_impl--modeling.BilingualForSequenceClassification",
14
  "AutoModelForTokenClassification": "dangvantuan/bilingual_impl--modeling.BilingualForTokenClassification"
15
+ },
 
 
16
  "bos_token_id": 0,
17
+ "classifier_dropout": null,
18
  "eos_token_id": 2,
19
  "hidden_act": "gelu",
20
  "hidden_dropout_prob": 0.1,
 
23
  "intermediate_size": 3072,
24
  "layer_norm_eps": 1e-05,
25
  "max_position_embeddings": 514,
26
+ "model_type": "xlm-roberta",
27
  "num_attention_heads": 12,
28
  "num_hidden_layers": 12,
29
  "output_past": true,
30
  "pad_token_id": 1,
31
  "position_embedding_type": "absolute",
32
  "torch_dtype": "float16",
33
+ "transformers_version": "4.48.0",
34
  "type_vocab_size": 1,
35
  "use_cache": true,
36
  "vocab_size": 250002
config_sentence_transformers.json CHANGED
@@ -1,9 +1,10 @@
1
  {
2
  "__version__": {
3
- "sentence_transformers": "2.7.0",
4
- "transformers": "4.38.2",
5
- "pytorch": "2.2.1+cu121"
6
  },
7
  "prompts": {},
8
- "default_prompt_name": null
 
9
  }
 
1
  {
2
  "__version__": {
3
+ "sentence_transformers": "4.0.2",
4
+ "transformers": "4.48.0",
5
+ "pytorch": "2.3.1"
6
  },
7
  "prompts": {},
8
+ "default_prompt_name": null,
9
+ "similarity_fn_name": "cosine"
10
  }
onnx/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1680e5ce82aabe8e69d9ad6d76a854bbc31dc68ea6a69376b9f2a5e35fdb60f5
3
+ size 1110092472
tokenizer.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f1cc44ad7faaeec47241864835473fd5403f2da94673f3f764a77ebcb0a803ec
3
- size 17083009
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:883b037111086fd4dfebbbc9b7cee11e1517b5e0c0514879478661440f137085
3
+ size 17082987
tokenizer_config.json CHANGED
@@ -45,10 +45,18 @@
45
  "clean_up_tokenization_spaces": true,
46
  "cls_token": "<s>",
47
  "eos_token": "</s>",
 
48
  "mask_token": "<mask>",
 
49
  "model_max_length": 512,
 
50
  "pad_token": "<pad>",
 
 
51
  "sep_token": "</s>",
 
52
  "tokenizer_class": "XLMRobertaTokenizer",
 
 
53
  "unk_token": "<unk>"
54
  }
 
45
  "clean_up_tokenization_spaces": true,
46
  "cls_token": "<s>",
47
  "eos_token": "</s>",
48
+ "extra_special_tokens": {},
49
  "mask_token": "<mask>",
50
+ "max_length": 512,
51
  "model_max_length": 512,
52
+ "pad_to_multiple_of": null,
53
  "pad_token": "<pad>",
54
+ "pad_token_type_id": 0,
55
+ "padding_side": "right",
56
  "sep_token": "</s>",
57
+ "stride": 0,
58
  "tokenizer_class": "XLMRobertaTokenizer",
59
+ "truncation_side": "right",
60
+ "truncation_strategy": "longest_first",
61
  "unk_token": "<unk>"
62
  }