diwank commited on
Commit
880f0cf
1 Parent(s): 46125cb

Add new SentenceTransformer model.

Browse files
.gitattributes CHANGED
@@ -33,3 +33,12 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ pytorch_model.bin filter=lfs diff=lfs merge=lfs -text
37
+ 2_Asym/140225464236784_Dense/pytorch_model.bin filter=lfs diff=lfs merge=lfs -text
38
+ 2_Asym/140228101622896_Dense/pytorch_model.bin filter=lfs diff=lfs merge=lfs -text
39
+ 2_Asym/140225464235392_Dense/pytorch_model.bin filter=lfs diff=lfs merge=lfs -text
40
+ 2_Asym/140225464235968_Dense/pytorch_model.bin filter=lfs diff=lfs merge=lfs -text
41
+ 2_Asym/140225464223632_Dense/pytorch_model.bin filter=lfs diff=lfs merge=lfs -text
42
+ 2_Asym/140225464236064_Dense/pytorch_model.bin filter=lfs diff=lfs merge=lfs -text
43
+ 2_Asym/140225464231312_Dense/pytorch_model.bin filter=lfs diff=lfs merge=lfs -text
44
+ 2_Asym/140225464232416_Dense/pytorch_model.bin filter=lfs diff=lfs merge=lfs -text
1_Pooling/config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "word_embedding_dimension": 1024,
3
+ "pooling_mode_cls_token": true,
4
+ "pooling_mode_mean_tokens": false,
5
+ "pooling_mode_max_tokens": false,
6
+ "pooling_mode_mean_sqrt_len_tokens": false
7
+ }
2_Asym/140225464223632_Dense/config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"in_features": 1024, "out_features": 2048, "bias": true, "activation_function": "torch.nn.modules.activation.Tanh"}
2_Asym/140225464223632_Dense/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e09d27f6e37e79b5889f99320b29eab442407414661f86706af8946f783d0239
3
+ size 8398460
2_Asym/140225464231312_Dense/config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"in_features": 2048, "out_features": 2048, "bias": true, "activation_function": "torch.nn.modules.activation.Tanh"}
2_Asym/140225464231312_Dense/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75db6e76d6b5838f1cd255ba48a1fffb13b807985583df180effbfb73abbfc92
3
+ size 16787068
2_Asym/140225464232416_Dense/config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"in_features": 2048, "out_features": 1024, "bias": true, "activation_function": "torch.nn.modules.activation.Tanh"}
2_Asym/140225464232416_Dense/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f73d9d5ee30e1c9c367620f4b32b59133841a9aa66c34c213c4eceeed1d91739
3
+ size 8394364
2_Asym/140225464233232_Dropout/config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"dropout": 0.1}
2_Asym/140225464235392_Dense/config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"in_features": 2048, "out_features": 2048, "bias": true, "activation_function": "torch.nn.modules.activation.Tanh"}
2_Asym/140225464235392_Dense/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53306f081b130f877d2d20e684b403951a08cf410c6854f2fc23f49427d5ba85
3
+ size 16787068
2_Asym/140225464235968_Dense/config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"in_features": 2048, "out_features": 1024, "bias": true, "activation_function": "torch.nn.modules.activation.Tanh"}
2_Asym/140225464235968_Dense/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:762e434b5b7dcfef9ae42841a4193578ecb74d750c5efa5433fd0b129883511f
3
+ size 8394364
2_Asym/140225464236064_Dense/config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"in_features": 2048, "out_features": 2048, "bias": true, "activation_function": "torch.nn.modules.activation.Tanh"}
2_Asym/140225464236064_Dense/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7cc7b864e5a7d41a179fa8660606d0d65e7a0ae3a3ac2076b810ee07234861c
3
+ size 16787068
2_Asym/140225464236784_Dense/config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"in_features": 1024, "out_features": 2048, "bias": true, "activation_function": "torch.nn.modules.activation.Tanh"}
2_Asym/140225464236784_Dense/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2759d8139196720ab46d120d3225231d160bd9fa74f39d7f59bab8a447155db
3
+ size 8398460
2_Asym/140225507851568_Dropout/config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"dropout": 0.1}
2_Asym/140228101622896_Dense/config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"in_features": 2048, "out_features": 2048, "bias": true, "activation_function": "torch.nn.modules.activation.Tanh"}
2_Asym/140228101622896_Dense/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c82ba0624007b1e5b7f281fdafa92e7106f7c7f954de0617fcf3ff261bd154f
3
+ size 16787068
2_Asym/config.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "types": {
3
+ "140225464236784_Dense": "sentence_transformers.models.Dense",
4
+ "140228101622896_Dense": "sentence_transformers.models.Dense",
5
+ "140225507851568_Dropout": "sentence_transformers.models.Dropout",
6
+ "140225464235392_Dense": "sentence_transformers.models.Dense",
7
+ "140225464235968_Dense": "sentence_transformers.models.Dense",
8
+ "140225464231168_Normalize": "sentence_transformers.models.Normalize",
9
+ "140225464223632_Dense": "sentence_transformers.models.Dense",
10
+ "140225464236064_Dense": "sentence_transformers.models.Dense",
11
+ "140225464233232_Dropout": "sentence_transformers.models.Dropout",
12
+ "140225464231312_Dense": "sentence_transformers.models.Dense",
13
+ "140225464232416_Dense": "sentence_transformers.models.Dense",
14
+ "140225464235536_Normalize": "sentence_transformers.models.Normalize"
15
+ },
16
+ "structure": {
17
+ "dialog": [
18
+ "140225464236784_Dense",
19
+ "140228101622896_Dense",
20
+ "140225507851568_Dropout",
21
+ "140225464235392_Dense",
22
+ "140225464235968_Dense",
23
+ "140225464231168_Normalize"
24
+ ],
25
+ "fact": [
26
+ "140225464223632_Dense",
27
+ "140225464236064_Dense",
28
+ "140225464233232_Dropout",
29
+ "140225464231312_Dense",
30
+ "140225464232416_Dense",
31
+ "140225464235536_Normalize"
32
+ ]
33
+ },
34
+ "parameters": {
35
+ "allow_empty_key": false
36
+ }
37
+ }
README.md ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ pipeline_tag: sentence-similarity
3
+ tags:
4
+ - sentence-transformers
5
+ - feature-extraction
6
+ - sentence-similarity
7
+
8
+ ---
9
+
10
+ # diwank/dfe-large-en-2
11
+
12
+ This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 2048 dimensional dense vector space and can be used for tasks like clustering or semantic search.
13
+
14
+ <!--- Describe your model here -->
15
+
16
+ ## Usage (Sentence-Transformers)
17
+
18
+ Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed:
19
+
20
+ ```
21
+ pip install -U sentence-transformers
22
+ ```
23
+
24
+ Then you can use the model like this:
25
+
26
+ ```python
27
+ from sentence_transformers import SentenceTransformer
28
+ sentences = ["This is an example sentence", "Each sentence is converted"]
29
+
30
+ model = SentenceTransformer('diwank/dfe-large-en-2')
31
+ embeddings = model.encode(sentences)
32
+ print(embeddings)
33
+ ```
34
+
35
+
36
+
37
+ ## Evaluation Results
38
+
39
+ <!--- Describe how your model was evaluated -->
40
+
41
+ For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name=diwank/dfe-large-en-2)
42
+
43
+
44
+ ## Training
45
+ The model was trained with the parameters:
46
+
47
+ **DataLoader**:
48
+
49
+ `torch.utils.data.dataloader.DataLoader` of length 3633 with parameters:
50
+ ```
51
+ {'batch_size': 1024, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'}
52
+ ```
53
+
54
+ **Loss**:
55
+
56
+ `sentence_transformers.losses.CosineSimilarityLoss.CosineSimilarityLoss`
57
+
58
+ Parameters of the fit()-Method:
59
+ ```
60
+ {
61
+ "epochs": 4,
62
+ "evaluation_steps": 2000,
63
+ "evaluator": "sentence_transformers.evaluation.EmbeddingSimilarityEvaluator.EmbeddingSimilarityEvaluator",
64
+ "max_grad_norm": 1,
65
+ "optimizer_class": "<class 'lion_pytorch.lion_pytorch.Lion'>",
66
+ "optimizer_params": {
67
+ "lr": 0.0001,
68
+ "weight_decay": 0.01
69
+ },
70
+ "scheduler": "WarmupCosine",
71
+ "steps_per_epoch": null,
72
+ "warmup_steps": 100,
73
+ "weight_decay": 0.01
74
+ }
75
+ ```
76
+
77
+
78
+ ## Full Model Architecture
79
+ ```
80
+ SentenceTransformer(
81
+ (0): Transformer({'max_seq_length': 512, 'do_lower_case': True}) with Transformer model: BertModel
82
+ (1): Pooling({'word_embedding_dimension': 1024, 'pooling_mode_cls_token': True, 'pooling_mode_mean_tokens': False, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False})
83
+ (2): Asym(
84
+ (dialog-0): Dense({'in_features': 1024, 'out_features': 2048, 'bias': True, 'activation_function': 'torch.nn.modules.activation.Tanh'})
85
+ (dialog-1): Dense({'in_features': 2048, 'out_features': 2048, 'bias': True, 'activation_function': 'torch.nn.modules.activation.Tanh'})
86
+ (dialog-2): Dropout(
87
+ (dropout_layer): Dropout(p=0.1, inplace=False)
88
+ )
89
+ (dialog-3): Dense({'in_features': 2048, 'out_features': 2048, 'bias': True, 'activation_function': 'torch.nn.modules.activation.Tanh'})
90
+ (dialog-4): Dense({'in_features': 2048, 'out_features': 1024, 'bias': True, 'activation_function': 'torch.nn.modules.activation.Tanh'})
91
+ (dialog-5): Normalize()
92
+ (fact-0): Dense({'in_features': 1024, 'out_features': 2048, 'bias': True, 'activation_function': 'torch.nn.modules.activation.Tanh'})
93
+ (fact-1): Dense({'in_features': 2048, 'out_features': 2048, 'bias': True, 'activation_function': 'torch.nn.modules.activation.Tanh'})
94
+ (fact-2): Dropout(
95
+ (dropout_layer): Dropout(p=0.1, inplace=False)
96
+ )
97
+ (fact-3): Dense({'in_features': 2048, 'out_features': 2048, 'bias': True, 'activation_function': 'torch.nn.modules.activation.Tanh'})
98
+ (fact-4): Dense({'in_features': 2048, 'out_features': 1024, 'bias': True, 'activation_function': 'torch.nn.modules.activation.Tanh'})
99
+ (fact-5): Normalize()
100
+ )
101
+ )
102
+ ```
103
+
104
+ ## Citing & Authors
105
+
106
+ <!--- Describe where people can find more information -->
added_tokens.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "[CLS]": 101,
3
+ "[MASK]": 103,
4
+ "[PAD]": 0,
5
+ "[SEP]": 102,
6
+ "[UNK]": 100
7
+ }
config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/root/.cache/torch/sentence_transformers/BAAI_bge-large-en-v1.5/",
3
+ "architectures": [
4
+ "BertModel"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 1024,
12
+ "id2label": {
13
+ "0": "LABEL_0"
14
+ },
15
+ "initializer_range": 0.02,
16
+ "intermediate_size": 4096,
17
+ "label2id": {
18
+ "LABEL_0": 0
19
+ },
20
+ "layer_norm_eps": 1e-12,
21
+ "max_position_embeddings": 512,
22
+ "model_type": "bert",
23
+ "num_attention_heads": 16,
24
+ "num_hidden_layers": 24,
25
+ "pad_token_id": 0,
26
+ "position_embedding_type": "absolute",
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.34.0",
29
+ "type_vocab_size": 2,
30
+ "use_cache": true,
31
+ "vocab_size": 30522
32
+ }
config_sentence_transformers.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "__version__": {
3
+ "sentence_transformers": "2.2.2",
4
+ "transformers": "4.28.1",
5
+ "pytorch": "1.13.0+cu117"
6
+ }
7
+ }
modules.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "idx": 0,
4
+ "name": "0",
5
+ "path": "",
6
+ "type": "sentence_transformers.models.Transformer"
7
+ },
8
+ {
9
+ "idx": 1,
10
+ "name": "1",
11
+ "path": "1_Pooling",
12
+ "type": "sentence_transformers.models.Pooling"
13
+ },
14
+ {
15
+ "idx": 2,
16
+ "name": "2",
17
+ "path": "2_Asym",
18
+ "type": "sentence_transformers.models.Asym"
19
+ }
20
+ ]
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b4674c2135c4571743ae1c8ac2c8aa857cfdbd69eb18ecc2f39b57063463452
3
+ size 1340699814
sentence_bert_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "max_seq_length": 512,
3
+ "do_lower_case": true
4
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "additional_special_tokens": [],
45
+ "clean_up_tokenization_spaces": true,
46
+ "cls_token": "[CLS]",
47
+ "do_basic_tokenize": true,
48
+ "do_lower_case": true,
49
+ "mask_token": "[MASK]",
50
+ "model_max_length": 512,
51
+ "never_split": null,
52
+ "pad_token": "[PAD]",
53
+ "sep_token": "[SEP]",
54
+ "strip_accents": null,
55
+ "tokenize_chinese_chars": true,
56
+ "tokenizer_class": "BertTokenizer",
57
+ "unk_token": "[UNK]"
58
+ }
vocab.txt ADDED
The diff for this file is too large to render. See raw diff