train STS and NLI dataset(KLUE, KAKAO)
Browse files- .gitattributes +1 -0
- config.json +1 -1
- pytorch_model.bin +1 -1
- tokenizer_config.json +1 -1
.gitattributes
CHANGED
@@ -26,3 +26,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
26 |
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
28 |
pytorch_model.bin filter=lfs diff=lfs merge=lfs -text
|
|
|
|
26 |
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
28 |
pytorch_model.bin filter=lfs diff=lfs merge=lfs -text
|
29 |
+
.git/lfs/objects/e0/77/e07700d437b808a7a8c43550a67f06949fa1c0663f0bf68d6e194d47663c41ca filter=lfs diff=lfs merge=lfs -text
|
config.json
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
{
|
2 |
-
"_name_or_path": "/content/drive/MyDrive/sentence_transformers/
|
3 |
"architectures": [
|
4 |
"RobertaModel"
|
5 |
],
|
|
|
1 |
{
|
2 |
+
"_name_or_path": "/content/drive/MyDrive/sentence_transformers/model2/",
|
3 |
"architectures": [
|
4 |
"RobertaModel"
|
5 |
],
|
pytorch_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 272407281
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a22f800a64ef1368330beb74e6bc69c13c52dc048ab9758f53d6905e47fa5a0f
|
3 |
size 272407281
|
tokenizer_config.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"do_lower_case": false, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "do_basic_tokenize": true, "never_split": null, "bos_token": "[CLS]", "eos_token": "[SEP]", "model_max_length": 512, "special_tokens_map_file": "/root/.cache/torch/sentence_transformers/klue_roberta-small/special_tokens_map.json", "name_or_path": "/content/drive/MyDrive/sentence_transformers/
|
|
|
1 |
+
{"do_lower_case": false, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "do_basic_tokenize": true, "never_split": null, "bos_token": "[CLS]", "eos_token": "[SEP]", "model_max_length": 512, "special_tokens_map_file": "/root/.cache/torch/sentence_transformers/klue_roberta-small/special_tokens_map.json", "name_or_path": "/content/drive/MyDrive/sentence_transformers/model2/", "tokenizer_class": "BertTokenizer"}
|