dima806 commited on
Commit
e0ca19a
1 Parent(s): 393ac17

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. checkpoint-1134/config.json +30 -0
  2. checkpoint-1134/optimizer.pt +3 -0
  3. checkpoint-1134/pytorch_model.bin +3 -0
  4. checkpoint-1134/rng_state.pth +3 -0
  5. checkpoint-1134/scheduler.pt +3 -0
  6. checkpoint-1134/trainer_state.json +0 -0
  7. checkpoint-1134/training_args.bin +3 -0
  8. checkpoint-1512/config.json +30 -0
  9. checkpoint-1512/optimizer.pt +3 -0
  10. checkpoint-1512/pytorch_model.bin +3 -0
  11. checkpoint-1512/rng_state.pth +3 -0
  12. checkpoint-1512/scheduler.pt +3 -0
  13. checkpoint-1512/trainer_state.json +0 -0
  14. checkpoint-1512/training_args.bin +3 -0
  15. checkpoint-1890/config.json +30 -0
  16. checkpoint-1890/optimizer.pt +3 -0
  17. checkpoint-1890/pytorch_model.bin +3 -0
  18. checkpoint-1890/rng_state.pth +3 -0
  19. checkpoint-1890/scheduler.pt +3 -0
  20. checkpoint-1890/trainer_state.json +0 -0
  21. checkpoint-1890/training_args.bin +3 -0
  22. checkpoint-2268/config.json +30 -0
  23. checkpoint-2268/optimizer.pt +3 -0
  24. checkpoint-2268/pytorch_model.bin +3 -0
  25. checkpoint-2268/rng_state.pth +3 -0
  26. checkpoint-2268/scheduler.pt +3 -0
  27. checkpoint-2268/trainer_state.json +0 -0
  28. checkpoint-2268/training_args.bin +3 -0
  29. checkpoint-2646/config.json +30 -0
  30. checkpoint-2646/optimizer.pt +3 -0
  31. checkpoint-2646/pytorch_model.bin +3 -0
  32. checkpoint-2646/rng_state.pth +3 -0
  33. checkpoint-2646/scheduler.pt +3 -0
  34. checkpoint-2646/trainer_state.json +0 -0
  35. checkpoint-2646/training_args.bin +3 -0
  36. checkpoint-3024/config.json +30 -0
  37. checkpoint-3024/optimizer.pt +3 -0
  38. checkpoint-3024/pytorch_model.bin +3 -0
  39. checkpoint-3024/rng_state.pth +3 -0
  40. checkpoint-3024/scheduler.pt +3 -0
  41. checkpoint-3024/trainer_state.json +0 -0
  42. checkpoint-3024/training_args.bin +3 -0
  43. checkpoint-3402/config.json +30 -0
  44. checkpoint-3402/optimizer.pt +3 -0
  45. checkpoint-3402/pytorch_model.bin +3 -0
  46. checkpoint-3402/rng_state.pth +3 -0
  47. checkpoint-3402/scheduler.pt +3 -0
  48. checkpoint-3402/trainer_state.json +0 -0
  49. checkpoint-3402/training_args.bin +3 -0
  50. checkpoint-378/config.json +30 -0
checkpoint-1134/config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "distilbert-base-cased",
3
+ "activation": "gelu",
4
+ "architectures": [
5
+ "DistilBertForSequenceClassification"
6
+ ],
7
+ "attention_dropout": 0.1,
8
+ "dim": 768,
9
+ "dropout": 0.1,
10
+ "hidden_dim": 3072,
11
+ "id2label": {
12
+ "0": "NO DISASTER",
13
+ "1": "DISASTER"
14
+ },
15
+ "initializer_range": 0.02,
16
+ "max_position_embeddings": 512,
17
+ "model_type": "distilbert",
18
+ "n_heads": 12,
19
+ "n_layers": 6,
20
+ "output_past": true,
21
+ "pad_token_id": 0,
22
+ "problem_type": "single_label_classification",
23
+ "qa_dropout": 0.1,
24
+ "seq_classif_dropout": 0.2,
25
+ "sinusoidal_pos_embds": false,
26
+ "tie_weights_": true,
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.30.2",
29
+ "vocab_size": 28996
30
+ }
checkpoint-1134/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:288a3a2153c0322b4276e669c96adb82618323aa14dd22a877cdc10d5ff281fc
3
+ size 526325317
checkpoint-1134/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf607644ed3f532ddfcf22bf3b6b096f6d857686842d3ded36af6cf515d24b7c
3
+ size 263167661
checkpoint-1134/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:589ad13af77107a97b9c7365d87d9e8ab0ae8d444ec08c6f1b85dafe8552374c
3
+ size 14575
checkpoint-1134/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6c1f9635b56ab51528d4beb03bca865e6a9a33739312547fb70a929ef992f64
3
+ size 627
checkpoint-1134/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1134/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7c01d8e107b4a20c0ba0f3692dae4e25d8f1dffe1d23d6e4f4bdf92b87ab5ea
3
+ size 3899
checkpoint-1512/config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "distilbert-base-cased",
3
+ "activation": "gelu",
4
+ "architectures": [
5
+ "DistilBertForSequenceClassification"
6
+ ],
7
+ "attention_dropout": 0.1,
8
+ "dim": 768,
9
+ "dropout": 0.1,
10
+ "hidden_dim": 3072,
11
+ "id2label": {
12
+ "0": "NO DISASTER",
13
+ "1": "DISASTER"
14
+ },
15
+ "initializer_range": 0.02,
16
+ "max_position_embeddings": 512,
17
+ "model_type": "distilbert",
18
+ "n_heads": 12,
19
+ "n_layers": 6,
20
+ "output_past": true,
21
+ "pad_token_id": 0,
22
+ "problem_type": "single_label_classification",
23
+ "qa_dropout": 0.1,
24
+ "seq_classif_dropout": 0.2,
25
+ "sinusoidal_pos_embds": false,
26
+ "tie_weights_": true,
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.30.2",
29
+ "vocab_size": 28996
30
+ }
checkpoint-1512/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87ce52713400f327c5fd03cd733de4a3969a3062515aba0904a292f02e98fe96
3
+ size 526325317
checkpoint-1512/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c453f3f6a59f2ec8da836d5fdb537bd870363dc090f30d9341e30fb9a06987b
3
+ size 263167661
checkpoint-1512/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4652bd48bb7c106c661fd71929aac1388a67e4cd1e78571ff090909c5e7dc7d0
3
+ size 14575
checkpoint-1512/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08dda3a4f935a7a4b13455d1b343f791c393a17cccb53664b36097e59734998c
3
+ size 627
checkpoint-1512/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1512/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7c01d8e107b4a20c0ba0f3692dae4e25d8f1dffe1d23d6e4f4bdf92b87ab5ea
3
+ size 3899
checkpoint-1890/config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "distilbert-base-cased",
3
+ "activation": "gelu",
4
+ "architectures": [
5
+ "DistilBertForSequenceClassification"
6
+ ],
7
+ "attention_dropout": 0.1,
8
+ "dim": 768,
9
+ "dropout": 0.1,
10
+ "hidden_dim": 3072,
11
+ "id2label": {
12
+ "0": "NO DISASTER",
13
+ "1": "DISASTER"
14
+ },
15
+ "initializer_range": 0.02,
16
+ "max_position_embeddings": 512,
17
+ "model_type": "distilbert",
18
+ "n_heads": 12,
19
+ "n_layers": 6,
20
+ "output_past": true,
21
+ "pad_token_id": 0,
22
+ "problem_type": "single_label_classification",
23
+ "qa_dropout": 0.1,
24
+ "seq_classif_dropout": 0.2,
25
+ "sinusoidal_pos_embds": false,
26
+ "tie_weights_": true,
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.30.2",
29
+ "vocab_size": 28996
30
+ }
checkpoint-1890/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d678854dbfdcfa8b908c9332ac25c1f8ef0c3ff62cfa32314a14fdeba3398df
3
+ size 526325317
checkpoint-1890/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f57911e1cd736481c049e1f00b39d9c6f9570bb5e62277d1525fea2092d3ff61
3
+ size 263167661
checkpoint-1890/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4736e2b29a3bb716eb6278813e759366aca017726091b3748ed8a0cbd6fca255
3
+ size 14575
checkpoint-1890/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04b47afbdd02c26529bafa5013aad97cbb6f1bc4bbbff15fd4376d3ffddaeab4
3
+ size 627
checkpoint-1890/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1890/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7c01d8e107b4a20c0ba0f3692dae4e25d8f1dffe1d23d6e4f4bdf92b87ab5ea
3
+ size 3899
checkpoint-2268/config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "distilbert-base-cased",
3
+ "activation": "gelu",
4
+ "architectures": [
5
+ "DistilBertForSequenceClassification"
6
+ ],
7
+ "attention_dropout": 0.1,
8
+ "dim": 768,
9
+ "dropout": 0.1,
10
+ "hidden_dim": 3072,
11
+ "id2label": {
12
+ "0": "NO DISASTER",
13
+ "1": "DISASTER"
14
+ },
15
+ "initializer_range": 0.02,
16
+ "max_position_embeddings": 512,
17
+ "model_type": "distilbert",
18
+ "n_heads": 12,
19
+ "n_layers": 6,
20
+ "output_past": true,
21
+ "pad_token_id": 0,
22
+ "problem_type": "single_label_classification",
23
+ "qa_dropout": 0.1,
24
+ "seq_classif_dropout": 0.2,
25
+ "sinusoidal_pos_embds": false,
26
+ "tie_weights_": true,
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.30.2",
29
+ "vocab_size": 28996
30
+ }
checkpoint-2268/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb00801aa9d975c647b6e878986a419cd3cade5e19595bca603265b4e575932c
3
+ size 526325317
checkpoint-2268/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aecf74abbd12126248d1ab6b0343cb7f1a8e8ea550da4711db32b4c27f1a460b
3
+ size 263167661
checkpoint-2268/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a0a36e41b0f6ae09acc41f6d5ee6a72f9ef8ae9bbab3384bc65d0290dfa4975
3
+ size 14575
checkpoint-2268/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:587b3803bf070c7fd1a5809542f456d09391b47ebc2a66313fffa72515a3cc84
3
+ size 627
checkpoint-2268/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-2268/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7c01d8e107b4a20c0ba0f3692dae4e25d8f1dffe1d23d6e4f4bdf92b87ab5ea
3
+ size 3899
checkpoint-2646/config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "distilbert-base-cased",
3
+ "activation": "gelu",
4
+ "architectures": [
5
+ "DistilBertForSequenceClassification"
6
+ ],
7
+ "attention_dropout": 0.1,
8
+ "dim": 768,
9
+ "dropout": 0.1,
10
+ "hidden_dim": 3072,
11
+ "id2label": {
12
+ "0": "NO DISASTER",
13
+ "1": "DISASTER"
14
+ },
15
+ "initializer_range": 0.02,
16
+ "max_position_embeddings": 512,
17
+ "model_type": "distilbert",
18
+ "n_heads": 12,
19
+ "n_layers": 6,
20
+ "output_past": true,
21
+ "pad_token_id": 0,
22
+ "problem_type": "single_label_classification",
23
+ "qa_dropout": 0.1,
24
+ "seq_classif_dropout": 0.2,
25
+ "sinusoidal_pos_embds": false,
26
+ "tie_weights_": true,
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.30.2",
29
+ "vocab_size": 28996
30
+ }
checkpoint-2646/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53912039cd2f47825838734a584e08d2a7dd2446c9cec942224fd1ae50de1e47
3
+ size 526325317
checkpoint-2646/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8689e4c6d7995e2fae60d4a7f938d919a858337c1e51bab76711830eb6f9063b
3
+ size 263167661
checkpoint-2646/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4efd6b59e664dd5d3e5b916b9efad5576e8242df7a17314fc41a8cf175dd2d31
3
+ size 14575
checkpoint-2646/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad115c008906441ed2ca379a9e36021b2662b360f7030e43093fc964b8bf0d60
3
+ size 627
checkpoint-2646/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-2646/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7c01d8e107b4a20c0ba0f3692dae4e25d8f1dffe1d23d6e4f4bdf92b87ab5ea
3
+ size 3899
checkpoint-3024/config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "distilbert-base-cased",
3
+ "activation": "gelu",
4
+ "architectures": [
5
+ "DistilBertForSequenceClassification"
6
+ ],
7
+ "attention_dropout": 0.1,
8
+ "dim": 768,
9
+ "dropout": 0.1,
10
+ "hidden_dim": 3072,
11
+ "id2label": {
12
+ "0": "NO DISASTER",
13
+ "1": "DISASTER"
14
+ },
15
+ "initializer_range": 0.02,
16
+ "max_position_embeddings": 512,
17
+ "model_type": "distilbert",
18
+ "n_heads": 12,
19
+ "n_layers": 6,
20
+ "output_past": true,
21
+ "pad_token_id": 0,
22
+ "problem_type": "single_label_classification",
23
+ "qa_dropout": 0.1,
24
+ "seq_classif_dropout": 0.2,
25
+ "sinusoidal_pos_embds": false,
26
+ "tie_weights_": true,
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.30.2",
29
+ "vocab_size": 28996
30
+ }
checkpoint-3024/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4adf5995dfff43bbcf0456a8150fb2939f3bf8c44115a1f81d62b513c5962006
3
+ size 526325317
checkpoint-3024/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d502e804f64e2acd194387aa5fd310131d70f8506192f89f513436dc2de8916
3
+ size 263167661
checkpoint-3024/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab62b0bebbe356352a4069309bea70f50837588122439261bc3e8a0e6ce05c23
3
+ size 14575
checkpoint-3024/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f9daf155d8896ce2ce99e58ebee9b511a9716e2308a91ca6e9dfd99c08653734
3
+ size 627
checkpoint-3024/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-3024/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7c01d8e107b4a20c0ba0f3692dae4e25d8f1dffe1d23d6e4f4bdf92b87ab5ea
3
+ size 3899
checkpoint-3402/config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "distilbert-base-cased",
3
+ "activation": "gelu",
4
+ "architectures": [
5
+ "DistilBertForSequenceClassification"
6
+ ],
7
+ "attention_dropout": 0.1,
8
+ "dim": 768,
9
+ "dropout": 0.1,
10
+ "hidden_dim": 3072,
11
+ "id2label": {
12
+ "0": "NO DISASTER",
13
+ "1": "DISASTER"
14
+ },
15
+ "initializer_range": 0.02,
16
+ "max_position_embeddings": 512,
17
+ "model_type": "distilbert",
18
+ "n_heads": 12,
19
+ "n_layers": 6,
20
+ "output_past": true,
21
+ "pad_token_id": 0,
22
+ "problem_type": "single_label_classification",
23
+ "qa_dropout": 0.1,
24
+ "seq_classif_dropout": 0.2,
25
+ "sinusoidal_pos_embds": false,
26
+ "tie_weights_": true,
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.30.2",
29
+ "vocab_size": 28996
30
+ }
checkpoint-3402/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e85edc83ec5ffe29136a805b6f367bf7eb43dff879939b6df7aa705d88753f94
3
+ size 526325317
checkpoint-3402/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:212663d3b081d1c5839dc0e4fb48b43cc45e577bc88f440512f19584946b9db6
3
+ size 263167661
checkpoint-3402/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33e0373cf75568e9b513cbf91702bd9f246da33da5b59e14eb78b0282c54ab90
3
+ size 14575
checkpoint-3402/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:329ec617799a517751e41c3900a932ef850318bdb418eea4fee34fa1016db88a
3
+ size 627
checkpoint-3402/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-3402/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7c01d8e107b4a20c0ba0f3692dae4e25d8f1dffe1d23d6e4f4bdf92b87ab5ea
3
+ size 3899
checkpoint-378/config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "distilbert-base-cased",
3
+ "activation": "gelu",
4
+ "architectures": [
5
+ "DistilBertForSequenceClassification"
6
+ ],
7
+ "attention_dropout": 0.1,
8
+ "dim": 768,
9
+ "dropout": 0.1,
10
+ "hidden_dim": 3072,
11
+ "id2label": {
12
+ "0": "NO DISASTER",
13
+ "1": "DISASTER"
14
+ },
15
+ "initializer_range": 0.02,
16
+ "max_position_embeddings": 512,
17
+ "model_type": "distilbert",
18
+ "n_heads": 12,
19
+ "n_layers": 6,
20
+ "output_past": true,
21
+ "pad_token_id": 0,
22
+ "problem_type": "single_label_classification",
23
+ "qa_dropout": 0.1,
24
+ "seq_classif_dropout": 0.2,
25
+ "sinusoidal_pos_embds": false,
26
+ "tie_weights_": true,
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.30.2",
29
+ "vocab_size": 28996
30
+ }