mspl commited on
Commit
590690a
·
verified ·
1 Parent(s): 3872d12

Add model checkpoints

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. finetuning/bert-base-uncased-groupYN-finetune_20240423231756-seed1337/config.json +27 -0
  2. finetuning/bert-base-uncased-groupYN-finetune_20240423231756-seed1337/pytorch_model.bin +3 -0
  3. finetuning/bert-base-uncased-groupYN-finetune_20240423231756-seed1337/training_args.bin +3 -0
  4. finetuning/bert-base-uncased-groupYN-finetune_20240423231756-seed23/config.json +27 -0
  5. finetuning/bert-base-uncased-groupYN-finetune_20240423231756-seed23/pytorch_model.bin +3 -0
  6. finetuning/bert-base-uncased-groupYN-finetune_20240423231756-seed23/training_args.bin +3 -0
  7. finetuning/bert-base-uncased-groupYN-finetune_20240423231756-seed271/config.json +27 -0
  8. finetuning/bert-base-uncased-groupYN-finetune_20240423231756-seed271/pytorch_model.bin +3 -0
  9. finetuning/bert-base-uncased-groupYN-finetune_20240423231756-seed271/training_args.bin +3 -0
  10. finetuning/bert-base-uncased-groupYN-finetune_20240423231756-seed314/config.json +27 -0
  11. finetuning/bert-base-uncased-groupYN-finetune_20240423231756-seed314/pytorch_model.bin +3 -0
  12. finetuning/bert-base-uncased-groupYN-finetune_20240423231756-seed314/training_args.bin +3 -0
  13. finetuning/bert-base-uncased-groupYN-finetune_20240423231756-seed42/config.json +27 -0
  14. finetuning/bert-base-uncased-groupYN-finetune_20240423231756-seed42/pytorch_model.bin +3 -0
  15. finetuning/bert-base-uncased-groupYN-finetune_20240423231756-seed42/training_args.bin +3 -0
  16. finetuning/bert-base-uncased-ingroupYN-finetune_20240423231756-seed1337/config.json +27 -0
  17. finetuning/bert-base-uncased-ingroupYN-finetune_20240423231756-seed1337/pytorch_model.bin +3 -0
  18. finetuning/bert-base-uncased-ingroupYN-finetune_20240423231756-seed1337/training_args.bin +3 -0
  19. finetuning/bert-base-uncased-ingroupYN-finetune_20240423231756-seed23/config.json +27 -0
  20. finetuning/bert-base-uncased-ingroupYN-finetune_20240423231756-seed23/pytorch_model.bin +3 -0
  21. finetuning/bert-base-uncased-ingroupYN-finetune_20240423231756-seed23/training_args.bin +3 -0
  22. finetuning/bert-base-uncased-ingroupYN-finetune_20240423231756-seed271/config.json +27 -0
  23. finetuning/bert-base-uncased-ingroupYN-finetune_20240423231756-seed271/pytorch_model.bin +3 -0
  24. finetuning/bert-base-uncased-ingroupYN-finetune_20240423231756-seed271/training_args.bin +3 -0
  25. finetuning/bert-base-uncased-ingroupYN-finetune_20240423231756-seed314/config.json +27 -0
  26. finetuning/bert-base-uncased-ingroupYN-finetune_20240423231756-seed314/pytorch_model.bin +3 -0
  27. finetuning/bert-base-uncased-ingroupYN-finetune_20240423231756-seed314/training_args.bin +3 -0
  28. finetuning/bert-base-uncased-ingroupYN-finetune_20240423231756-seed42/config.json +27 -0
  29. finetuning/bert-base-uncased-ingroupYN-finetune_20240423231756-seed42/pytorch_model.bin +3 -0
  30. finetuning/bert-base-uncased-ingroupYN-finetune_20240423231756-seed42/training_args.bin +3 -0
  31. finetuning/bert-base-uncased-intentYN-finetune_20240423231756-seed1337/config.json +27 -0
  32. finetuning/bert-base-uncased-intentYN-finetune_20240423231756-seed1337/pytorch_model.bin +3 -0
  33. finetuning/bert-base-uncased-intentYN-finetune_20240423231756-seed1337/training_args.bin +3 -0
  34. finetuning/bert-base-uncased-intentYN-finetune_20240423231756-seed23/config.json +27 -0
  35. finetuning/bert-base-uncased-intentYN-finetune_20240423231756-seed23/pytorch_model.bin +3 -0
  36. finetuning/bert-base-uncased-intentYN-finetune_20240423231756-seed23/training_args.bin +3 -0
  37. finetuning/bert-base-uncased-intentYN-finetune_20240423231756-seed271/config.json +27 -0
  38. finetuning/bert-base-uncased-intentYN-finetune_20240423231756-seed271/pytorch_model.bin +3 -0
  39. finetuning/bert-base-uncased-intentYN-finetune_20240423231756-seed271/training_args.bin +3 -0
  40. finetuning/bert-base-uncased-intentYN-finetune_20240423231756-seed314/config.json +27 -0
  41. finetuning/bert-base-uncased-intentYN-finetune_20240423231756-seed314/pytorch_model.bin +3 -0
  42. finetuning/bert-base-uncased-intentYN-finetune_20240423231756-seed314/training_args.bin +3 -0
  43. finetuning/bert-base-uncased-intentYN-finetune_20240423231756-seed42/config.json +27 -0
  44. finetuning/bert-base-uncased-intentYN-finetune_20240423231756-seed42/pytorch_model.bin +3 -0
  45. finetuning/bert-base-uncased-intentYN-finetune_20240423231756-seed42/training_args.bin +3 -0
  46. finetuning/bert-base-uncased-lewdYN-finetune_20240423231756-seed1337/config.json +27 -0
  47. finetuning/bert-base-uncased-lewdYN-finetune_20240423231756-seed1337/pytorch_model.bin +3 -0
  48. finetuning/bert-base-uncased-lewdYN-finetune_20240423231756-seed1337/training_args.bin +3 -0
  49. finetuning/bert-base-uncased-lewdYN-finetune_20240423231756-seed23/config.json +27 -0
  50. finetuning/bert-base-uncased-lewdYN-finetune_20240423231756-seed23/pytorch_model.bin +3 -0
finetuning/bert-base-uncased-groupYN-finetune_20240423231756-seed1337/config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "./model/bert-base-uncased",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-12,
15
+ "max_position_embeddings": 512,
16
+ "model_type": "bert",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 0,
20
+ "position_embedding_type": "absolute",
21
+ "problem_type": "single_label_classification",
22
+ "torch_dtype": "float32",
23
+ "transformers_version": "4.34.1",
24
+ "type_vocab_size": 2,
25
+ "use_cache": true,
26
+ "vocab_size": 30522
27
+ }
finetuning/bert-base-uncased-groupYN-finetune_20240423231756-seed1337/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2cab5ba6c3130bab55a5e5a2bca3f73aa3e9234202f88438d99653f9f8dac6b2
3
+ size 438001134
finetuning/bert-base-uncased-groupYN-finetune_20240423231756-seed1337/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:399806295d354ce6701020ec998f4433cfd3d14efcb8c562451020be30fdcaf3
3
+ size 5560
finetuning/bert-base-uncased-groupYN-finetune_20240423231756-seed23/config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "./model/bert-base-uncased",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-12,
15
+ "max_position_embeddings": 512,
16
+ "model_type": "bert",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 0,
20
+ "position_embedding_type": "absolute",
21
+ "problem_type": "single_label_classification",
22
+ "torch_dtype": "float32",
23
+ "transformers_version": "4.34.1",
24
+ "type_vocab_size": 2,
25
+ "use_cache": true,
26
+ "vocab_size": 30522
27
+ }
finetuning/bert-base-uncased-groupYN-finetune_20240423231756-seed23/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b849cf5fabd7e1ee63863e6d6e1da9262ced82646473ce3054143276f55c58d
3
+ size 438001134
finetuning/bert-base-uncased-groupYN-finetune_20240423231756-seed23/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aba87b37c663747af7eca46831a443bbb399f4b543b2304147a3fd7213d77bb6
3
+ size 5496
finetuning/bert-base-uncased-groupYN-finetune_20240423231756-seed271/config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "./model/bert-base-uncased",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-12,
15
+ "max_position_embeddings": 512,
16
+ "model_type": "bert",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 0,
20
+ "position_embedding_type": "absolute",
21
+ "problem_type": "single_label_classification",
22
+ "torch_dtype": "float32",
23
+ "transformers_version": "4.34.1",
24
+ "type_vocab_size": 2,
25
+ "use_cache": true,
26
+ "vocab_size": 30522
27
+ }
finetuning/bert-base-uncased-groupYN-finetune_20240423231756-seed271/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33f93ba55d9eb8691e08a5fa3b327e02a054c33e2f9837b27e8c335c2f0daa47
3
+ size 438001134
finetuning/bert-base-uncased-groupYN-finetune_20240423231756-seed271/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7633cb4cd69e3feaa4b551ea26f0fc360848358374cdcc8968d704f97e17871
3
+ size 5496
finetuning/bert-base-uncased-groupYN-finetune_20240423231756-seed314/config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "./model/bert-base-uncased",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-12,
15
+ "max_position_embeddings": 512,
16
+ "model_type": "bert",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 0,
20
+ "position_embedding_type": "absolute",
21
+ "problem_type": "single_label_classification",
22
+ "torch_dtype": "float32",
23
+ "transformers_version": "4.34.1",
24
+ "type_vocab_size": 2,
25
+ "use_cache": true,
26
+ "vocab_size": 30522
27
+ }
finetuning/bert-base-uncased-groupYN-finetune_20240423231756-seed314/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f808146a2954ed1ac855422df20b28bbcbc23eb7b5f45f8eec315c52bb6d7fd
3
+ size 438001134
finetuning/bert-base-uncased-groupYN-finetune_20240423231756-seed314/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c87cbe8f87d765b99e9ae24a804b13e6ca104c74a03c9ad6b9a1c88a26dfd92
3
+ size 5496
finetuning/bert-base-uncased-groupYN-finetune_20240423231756-seed42/config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "./model/bert-base-uncased",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-12,
15
+ "max_position_embeddings": 512,
16
+ "model_type": "bert",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 0,
20
+ "position_embedding_type": "absolute",
21
+ "problem_type": "single_label_classification",
22
+ "torch_dtype": "float32",
23
+ "transformers_version": "4.34.1",
24
+ "type_vocab_size": 2,
25
+ "use_cache": true,
26
+ "vocab_size": 30522
27
+ }
finetuning/bert-base-uncased-groupYN-finetune_20240423231756-seed42/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:207a29f035a86503121e777b8ff5eee44e366310ceb599b62e1b96ccc2cef0fc
3
+ size 438001134
finetuning/bert-base-uncased-groupYN-finetune_20240423231756-seed42/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94f573b0a6f380ac28bdaa92831942805ec359f1d7c418ab02d8af04915dc72f
3
+ size 5496
finetuning/bert-base-uncased-ingroupYN-finetune_20240423231756-seed1337/config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "./model/bert-base-uncased",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-12,
15
+ "max_position_embeddings": 512,
16
+ "model_type": "bert",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 0,
20
+ "position_embedding_type": "absolute",
21
+ "problem_type": "single_label_classification",
22
+ "torch_dtype": "float32",
23
+ "transformers_version": "4.34.1",
24
+ "type_vocab_size": 2,
25
+ "use_cache": true,
26
+ "vocab_size": 30522
27
+ }
finetuning/bert-base-uncased-ingroupYN-finetune_20240423231756-seed1337/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdcb6b935dd190b3dee24dc71b49562a0d2f1bd9c61cfd3701d7a1818bcdcef1
3
+ size 438001134
finetuning/bert-base-uncased-ingroupYN-finetune_20240423231756-seed1337/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:276835ceec7980249a7488458ac4efbc4dbe0ee197f20b45754d20ef1e52d4ba
3
+ size 5496
finetuning/bert-base-uncased-ingroupYN-finetune_20240423231756-seed23/config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "./model/bert-base-uncased",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-12,
15
+ "max_position_embeddings": 512,
16
+ "model_type": "bert",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 0,
20
+ "position_embedding_type": "absolute",
21
+ "problem_type": "single_label_classification",
22
+ "torch_dtype": "float32",
23
+ "transformers_version": "4.34.1",
24
+ "type_vocab_size": 2,
25
+ "use_cache": true,
26
+ "vocab_size": 30522
27
+ }
finetuning/bert-base-uncased-ingroupYN-finetune_20240423231756-seed23/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdcb6b935dd190b3dee24dc71b49562a0d2f1bd9c61cfd3701d7a1818bcdcef1
3
+ size 438001134
finetuning/bert-base-uncased-ingroupYN-finetune_20240423231756-seed23/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4ee9e75abee03f0360fcd55fb830af4f5331965e36ab3a801507a76d00e9b3b
3
+ size 5496
finetuning/bert-base-uncased-ingroupYN-finetune_20240423231756-seed271/config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "./model/bert-base-uncased",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-12,
15
+ "max_position_embeddings": 512,
16
+ "model_type": "bert",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 0,
20
+ "position_embedding_type": "absolute",
21
+ "problem_type": "single_label_classification",
22
+ "torch_dtype": "float32",
23
+ "transformers_version": "4.34.1",
24
+ "type_vocab_size": 2,
25
+ "use_cache": true,
26
+ "vocab_size": 30522
27
+ }
finetuning/bert-base-uncased-ingroupYN-finetune_20240423231756-seed271/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdcb6b935dd190b3dee24dc71b49562a0d2f1bd9c61cfd3701d7a1818bcdcef1
3
+ size 438001134
finetuning/bert-base-uncased-ingroupYN-finetune_20240423231756-seed271/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39044fe4e50815807a5bea610760870f0baa8367fe589c913fc4c0d7bba11307
3
+ size 5496
finetuning/bert-base-uncased-ingroupYN-finetune_20240423231756-seed314/config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "./model/bert-base-uncased",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-12,
15
+ "max_position_embeddings": 512,
16
+ "model_type": "bert",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 0,
20
+ "position_embedding_type": "absolute",
21
+ "problem_type": "single_label_classification",
22
+ "torch_dtype": "float32",
23
+ "transformers_version": "4.34.1",
24
+ "type_vocab_size": 2,
25
+ "use_cache": true,
26
+ "vocab_size": 30522
27
+ }
finetuning/bert-base-uncased-ingroupYN-finetune_20240423231756-seed314/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdcb6b935dd190b3dee24dc71b49562a0d2f1bd9c61cfd3701d7a1818bcdcef1
3
+ size 438001134
finetuning/bert-base-uncased-ingroupYN-finetune_20240423231756-seed314/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:695340cbe684bdf68e43174857814c5b447c2a272b76d24b2831a174ccfe8ff5
3
+ size 5496
finetuning/bert-base-uncased-ingroupYN-finetune_20240423231756-seed42/config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "./model/bert-base-uncased",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-12,
15
+ "max_position_embeddings": 512,
16
+ "model_type": "bert",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 0,
20
+ "position_embedding_type": "absolute",
21
+ "problem_type": "single_label_classification",
22
+ "torch_dtype": "float32",
23
+ "transformers_version": "4.34.1",
24
+ "type_vocab_size": 2,
25
+ "use_cache": true,
26
+ "vocab_size": 30522
27
+ }
finetuning/bert-base-uncased-ingroupYN-finetune_20240423231756-seed42/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdcb6b935dd190b3dee24dc71b49562a0d2f1bd9c61cfd3701d7a1818bcdcef1
3
+ size 438001134
finetuning/bert-base-uncased-ingroupYN-finetune_20240423231756-seed42/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a6d7434074ef5c23ad1370c9b232d21f21813981a219439109c44d597718423
3
+ size 5496
finetuning/bert-base-uncased-intentYN-finetune_20240423231756-seed1337/config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "./model/bert-base-uncased",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-12,
15
+ "max_position_embeddings": 512,
16
+ "model_type": "bert",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 0,
20
+ "position_embedding_type": "absolute",
21
+ "problem_type": "single_label_classification",
22
+ "torch_dtype": "float32",
23
+ "transformers_version": "4.34.1",
24
+ "type_vocab_size": 2,
25
+ "use_cache": true,
26
+ "vocab_size": 30522
27
+ }
finetuning/bert-base-uncased-intentYN-finetune_20240423231756-seed1337/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07282b829855192ed11317ed0c1342d56dccfdbf67d6c47631e61d3c53f88a53
3
+ size 438001134
finetuning/bert-base-uncased-intentYN-finetune_20240423231756-seed1337/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:306ccdf16fdeb3ea9b958ce18d28d4f60d2a765b01393009e47cf5fe3266976d
3
+ size 5496
finetuning/bert-base-uncased-intentYN-finetune_20240423231756-seed23/config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "./model/bert-base-uncased",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-12,
15
+ "max_position_embeddings": 512,
16
+ "model_type": "bert",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 0,
20
+ "position_embedding_type": "absolute",
21
+ "problem_type": "single_label_classification",
22
+ "torch_dtype": "float32",
23
+ "transformers_version": "4.34.1",
24
+ "type_vocab_size": 2,
25
+ "use_cache": true,
26
+ "vocab_size": 30522
27
+ }
finetuning/bert-base-uncased-intentYN-finetune_20240423231756-seed23/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07282b829855192ed11317ed0c1342d56dccfdbf67d6c47631e61d3c53f88a53
3
+ size 438001134
finetuning/bert-base-uncased-intentYN-finetune_20240423231756-seed23/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d31bd7f0e98091737e17cf3b01a8ca1e3a00e09550374739e31c9801d6c5149
3
+ size 5496
finetuning/bert-base-uncased-intentYN-finetune_20240423231756-seed271/config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "./model/bert-base-uncased",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-12,
15
+ "max_position_embeddings": 512,
16
+ "model_type": "bert",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 0,
20
+ "position_embedding_type": "absolute",
21
+ "problem_type": "single_label_classification",
22
+ "torch_dtype": "float32",
23
+ "transformers_version": "4.34.1",
24
+ "type_vocab_size": 2,
25
+ "use_cache": true,
26
+ "vocab_size": 30522
27
+ }
finetuning/bert-base-uncased-intentYN-finetune_20240423231756-seed271/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07282b829855192ed11317ed0c1342d56dccfdbf67d6c47631e61d3c53f88a53
3
+ size 438001134
finetuning/bert-base-uncased-intentYN-finetune_20240423231756-seed271/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16149fba5fa35447996ddbb5c697aa3283b1408ddf16c97d1c5d7f985d6e64e2
3
+ size 5496
finetuning/bert-base-uncased-intentYN-finetune_20240423231756-seed314/config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "./model/bert-base-uncased",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-12,
15
+ "max_position_embeddings": 512,
16
+ "model_type": "bert",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 0,
20
+ "position_embedding_type": "absolute",
21
+ "problem_type": "single_label_classification",
22
+ "torch_dtype": "float32",
23
+ "transformers_version": "4.34.1",
24
+ "type_vocab_size": 2,
25
+ "use_cache": true,
26
+ "vocab_size": 30522
27
+ }
finetuning/bert-base-uncased-intentYN-finetune_20240423231756-seed314/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07282b829855192ed11317ed0c1342d56dccfdbf67d6c47631e61d3c53f88a53
3
+ size 438001134
finetuning/bert-base-uncased-intentYN-finetune_20240423231756-seed314/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d2bbf01165fb1853daeda9d53c8fd2389b5fe3935d35738723158d279445184
3
+ size 5496
finetuning/bert-base-uncased-intentYN-finetune_20240423231756-seed42/config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "./model/bert-base-uncased",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-12,
15
+ "max_position_embeddings": 512,
16
+ "model_type": "bert",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 0,
20
+ "position_embedding_type": "absolute",
21
+ "problem_type": "single_label_classification",
22
+ "torch_dtype": "float32",
23
+ "transformers_version": "4.34.1",
24
+ "type_vocab_size": 2,
25
+ "use_cache": true,
26
+ "vocab_size": 30522
27
+ }
finetuning/bert-base-uncased-intentYN-finetune_20240423231756-seed42/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07282b829855192ed11317ed0c1342d56dccfdbf67d6c47631e61d3c53f88a53
3
+ size 438001134
finetuning/bert-base-uncased-intentYN-finetune_20240423231756-seed42/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b998beb3ab3eef819a6d36dee3c48dfa9a0dc1cf5586b0b39175f6e1a75d1ab
3
+ size 5496
finetuning/bert-base-uncased-lewdYN-finetune_20240423231756-seed1337/config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "./model/bert-base-uncased",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-12,
15
+ "max_position_embeddings": 512,
16
+ "model_type": "bert",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 0,
20
+ "position_embedding_type": "absolute",
21
+ "problem_type": "single_label_classification",
22
+ "torch_dtype": "float32",
23
+ "transformers_version": "4.34.1",
24
+ "type_vocab_size": 2,
25
+ "use_cache": true,
26
+ "vocab_size": 30522
27
+ }
finetuning/bert-base-uncased-lewdYN-finetune_20240423231756-seed1337/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c968e29ae9dcb2ffcfe0efdbe311f8eb97deb2f047b77ab3a773994141ed4f7
3
+ size 438001134
finetuning/bert-base-uncased-lewdYN-finetune_20240423231756-seed1337/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4810bcae103acb8f0dd28b3a5ac6441fad6fab1834d25ce182297e4e0306332d
3
+ size 5496
finetuning/bert-base-uncased-lewdYN-finetune_20240423231756-seed23/config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "./model/bert-base-uncased",
3
+ "architectures": [
4
+ "BertForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-12,
15
+ "max_position_embeddings": 512,
16
+ "model_type": "bert",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 0,
20
+ "position_embedding_type": "absolute",
21
+ "problem_type": "single_label_classification",
22
+ "torch_dtype": "float32",
23
+ "transformers_version": "4.34.1",
24
+ "type_vocab_size": 2,
25
+ "use_cache": true,
26
+ "vocab_size": 30522
27
+ }
finetuning/bert-base-uncased-lewdYN-finetune_20240423231756-seed23/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c968e29ae9dcb2ffcfe0efdbe311f8eb97deb2f047b77ab3a773994141ed4f7
3
+ size 438001134