Van Tuan DANG commited on
Commit
4afd7f7
1 Parent(s): 9588dd4

feat: Add CrossEncoder Model

Browse files
CECorrelationEvaluator_sts-dev_results.csv ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ epoch,steps,Pearson_Correlation,Spearman_Correlation
2
+ 0,1000,0.883374524387813,0.8878141758572982
3
+ 0,-1,0.8778756238245011,0.8868591218174223
4
+ 1,1000,0.8940448549924146,0.8968016856183934
5
+ 1,-1,0.9029116169622556,0.9012072331426946
6
+ 2,1000,0.9009231458755066,0.9003885599798547
7
+ 2,-1,0.9033158313584775,0.9019981749696708
8
+ 3,1000,0.8998464276084307,0.9020773671951405
9
+ 3,-1,0.8976356278255467,0.8986613274711471
10
+ 4,1000,0.8994414640077576,0.9028956951620126
11
+ 4,-1,0.9034409652005353,0.9014066994725192
12
+ 5,1000,0.9007225884212331,0.8999936913799494
13
+ 5,-1,0.8999793261506176,0.8999581415333522
14
+ 6,1000,0.9021117594963682,0.9008238147286273
15
+ 6,-1,0.9047506140805446,0.904083037618421
config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "camembert/camembert-large",
3
+ "architectures": [
4
+ "CamembertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 1024,
13
+ "id2label": {
14
+ "0": "LABEL_0"
15
+ },
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 4096,
18
+ "label2id": {
19
+ "LABEL_0": 0
20
+ },
21
+ "layer_norm_eps": 1e-05,
22
+ "max_position_embeddings": 514,
23
+ "model_type": "camembert",
24
+ "num_attention_heads": 16,
25
+ "num_hidden_layers": 24,
26
+ "output_past": true,
27
+ "pad_token_id": 1,
28
+ "position_embedding_type": "absolute",
29
+ "torch_dtype": "float32",
30
+ "transformers_version": "4.17.0",
31
+ "type_vocab_size": 1,
32
+ "use_cache": true,
33
+ "vocab_size": 32005
34
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6c9a98b5fa4f6e29b887c9a8288c2e9159a8210ba52a1d4a3fb9cf1b313b732
3
+ size 1346816173
sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f98f266fdc548c94216aaadc13ffaaafacf0c8793303e2195322d954549ea261
3
+ size 808767
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true}, "additional_special_tokens": ["<s>NOTUSED", "</s>NOTUSED"]}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "sep_token": "</s>", "cls_token": "<s>", "unk_token": "<unk>", "pad_token": "<pad>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "additional_special_tokens": ["<s>NOTUSED", "</s>NOTUSED"], "special_tokens_map_file": null, "name_or_path": "camembert/camembert-large", "sp_model_kwargs": {}, "tokenizer_class": "CamembertTokenizer"}