first commit
Browse files- CECorrelationEvaluator_sts-dev_results.csv +41 -0
- config.json +36 -0
- pytorch_model.bin +3 -0
- special_tokens_map.json +1 -0
- tokenizer.json +0 -0
- tokenizer_config.json +1 -0
- vocab.txt +0 -0
CECorrelationEvaluator_sts-dev_results.csv
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
epoch,steps,Pearson_Correlation,Spearman_Correlation
|
2 |
+
0,136,0.7203864865585515,0.7662701566807082
|
3 |
+
0,272,0.7856692229301432,0.8068189991516025
|
4 |
+
0,408,0.8207631946158755,0.8264061906152335
|
5 |
+
0,-1,0.8309692914953929,0.8339018433553216
|
6 |
+
1,136,0.8365409691112995,0.8381305750622835
|
7 |
+
1,272,0.8427390868951522,0.8424465439207105
|
8 |
+
1,408,0.8402965929473044,0.8423964679782459
|
9 |
+
1,-1,0.8468602563855128,0.846221321347627
|
10 |
+
2,136,0.84818217727024,0.8467257176521318
|
11 |
+
2,272,0.8471488745403296,0.8460260539923677
|
12 |
+
2,408,0.8491309724242002,0.847398824352393
|
13 |
+
2,-1,0.8476484465641093,0.8478949003740454
|
14 |
+
3,136,0.8504894496045473,0.849168406315226
|
15 |
+
3,272,0.8482807357554237,0.8464362587602215
|
16 |
+
3,408,0.8456386291679147,0.8457745464193018
|
17 |
+
3,-1,0.8509723995060233,0.8484620278442389
|
18 |
+
4,136,0.8494099771903617,0.8471714281568331
|
19 |
+
4,272,0.8509888140589064,0.8479015040172948
|
20 |
+
4,408,0.8478171240961387,0.8455120717841272
|
21 |
+
4,-1,0.8527587955691198,0.8504308473166906
|
22 |
+
5,136,0.8478601729709964,0.8460565459072923
|
23 |
+
5,272,0.8506573035144734,0.8476763170571476
|
24 |
+
5,408,0.8517710616990748,0.8489107111837615
|
25 |
+
5,-1,0.8489382275382517,0.8468254274816605
|
26 |
+
6,136,0.8516031389859442,0.8485798575719309
|
27 |
+
6,272,0.8510802884871553,0.848660298373065
|
28 |
+
6,408,0.8492146415634652,0.846611492987912
|
29 |
+
6,-1,0.8488078816850624,0.8457574479846395
|
30 |
+
7,136,0.8492091115501108,0.8463685995395548
|
31 |
+
7,272,0.8516746489419914,0.8492031961148984
|
32 |
+
7,408,0.8482869303349043,0.84521395697491
|
33 |
+
7,-1,0.8508632517415402,0.8477264139352209
|
34 |
+
8,136,0.8490123719021941,0.8456260925660452
|
35 |
+
8,272,0.8492321720787408,0.8457834056034438
|
36 |
+
8,408,0.8497090847896118,0.8464883039695794
|
37 |
+
8,-1,0.849578870014589,0.8464588669542932
|
38 |
+
9,136,0.8502257429012771,0.8470815155453882
|
39 |
+
9,272,0.8504134768238429,0.8470942388244539
|
40 |
+
9,408,0.8497718288056663,0.8464766291984329
|
41 |
+
9,-1,0.8499936727195425,0.8466534294462225
|
config.json
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "monologg/koelectra-small-v3-discriminator",
|
3 |
+
"architectures": [
|
4 |
+
"ElectraForSequenceClassification"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"classifier_dropout": null,
|
8 |
+
"embedding_size": 128,
|
9 |
+
"hidden_act": "gelu",
|
10 |
+
"hidden_dropout_prob": 0.1,
|
11 |
+
"hidden_size": 256,
|
12 |
+
"id2label": {
|
13 |
+
"0": "LABEL_0"
|
14 |
+
},
|
15 |
+
"initializer_range": 0.02,
|
16 |
+
"intermediate_size": 1024,
|
17 |
+
"label2id": {
|
18 |
+
"LABEL_0": 0
|
19 |
+
},
|
20 |
+
"layer_norm_eps": 1e-12,
|
21 |
+
"max_position_embeddings": 512,
|
22 |
+
"model_type": "electra",
|
23 |
+
"num_attention_heads": 4,
|
24 |
+
"num_hidden_layers": 12,
|
25 |
+
"pad_token_id": 0,
|
26 |
+
"position_embedding_type": "absolute",
|
27 |
+
"summary_activation": "gelu",
|
28 |
+
"summary_last_dropout": 0.1,
|
29 |
+
"summary_type": "first",
|
30 |
+
"summary_use_proj": true,
|
31 |
+
"torch_dtype": "float32",
|
32 |
+
"transformers_version": "4.19.2",
|
33 |
+
"type_vocab_size": 2,
|
34 |
+
"use_cache": true,
|
35 |
+
"vocab_size": 35000
|
36 |
+
}
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b3be9dcdc4ec26e0fd00c4b20391e60c5781c0f770c257247b0c40a6308078e3
|
3 |
+
size 56561193
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"do_lower_case": false, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "monologg/koelectra-small-v3-discriminator", "do_basic_tokenize": true, "never_split": null, "tokenizer_class": "ElectraTokenizer"}
|
vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|