yongzx commited on
Commit
8306627
1 Parent(s): a430e9f

rm --cached *

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +0 -41
  2. .gitignore +0 -11
  3. all_results.json +0 -14
  4. checkpoint-10000/config.json +0 -39
  5. checkpoint-10000/optimizer.pt +0 -3
  6. checkpoint-10000/pytorch_model.bin +0 -3
  7. checkpoint-10000/rng_state.pth +0 -3
  8. checkpoint-10000/scheduler.pt +0 -3
  9. checkpoint-10000/special_tokens_map.json +0 -1
  10. checkpoint-10000/tokenizer.json +0 -3
  11. checkpoint-10000/tokenizer_config.json +0 -1
  12. checkpoint-10000/trainer_state.json +0 -56
  13. checkpoint-10000/training_args.bin +0 -3
  14. checkpoint-10000/wikiann-az-results.txt +0 -8
  15. checkpoint-12500/config.json +0 -39
  16. checkpoint-12500/optimizer.pt +0 -3
  17. checkpoint-12500/pytorch_model.bin +0 -3
  18. checkpoint-12500/rng_state.pth +0 -3
  19. checkpoint-12500/scheduler.pt +0 -3
  20. checkpoint-12500/special_tokens_map.json +0 -1
  21. checkpoint-12500/tokenizer.json +0 -3
  22. checkpoint-12500/tokenizer_config.json +0 -1
  23. checkpoint-12500/trainer_state.json +0 -86
  24. checkpoint-12500/training_args.bin +0 -3
  25. checkpoint-12500/wikiann-az-results.txt +0 -8
  26. checkpoint-15000/config.json +0 -39
  27. checkpoint-15000/optimizer.pt +0 -3
  28. checkpoint-15000/pytorch_model.bin +0 -3
  29. checkpoint-15000/rng_state.pth +0 -3
  30. checkpoint-15000/scheduler.pt +0 -3
  31. checkpoint-15000/special_tokens_map.json +0 -1
  32. checkpoint-15000/tokenizer.json +0 -3
  33. checkpoint-15000/tokenizer_config.json +0 -1
  34. checkpoint-15000/trainer_state.json +0 -76
  35. checkpoint-15000/training_args.bin +0 -3
  36. checkpoint-20000/config.json +0 -39
  37. checkpoint-20000/optimizer.pt +0 -3
  38. checkpoint-20000/pytorch_model.bin +0 -3
  39. checkpoint-20000/rng_state.pth +0 -3
  40. checkpoint-20000/scheduler.pt +0 -3
  41. checkpoint-20000/special_tokens_map.json +0 -1
  42. checkpoint-20000/tokenizer.json +0 -3
  43. checkpoint-20000/tokenizer_config.json +0 -1
  44. checkpoint-20000/trainer_state.json +0 -96
  45. checkpoint-20000/training_args.bin +0 -3
  46. checkpoint-2500/config.json +0 -39
  47. checkpoint-2500/optimizer.pt +0 -3
  48. checkpoint-2500/pytorch_model.bin +0 -3
  49. checkpoint-2500/rng_state.pth +0 -3
  50. checkpoint-2500/scheduler.pt +0 -3
.gitattributes DELETED
@@ -1,41 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ftz filter=lfs diff=lfs merge=lfs -text
6
- *.gz filter=lfs diff=lfs merge=lfs -text
7
- *.h5 filter=lfs diff=lfs merge=lfs -text
8
- *.joblib filter=lfs diff=lfs merge=lfs -text
9
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
11
- *.model filter=lfs diff=lfs merge=lfs -text
12
- *.msgpack filter=lfs diff=lfs merge=lfs -text
13
- *.npy filter=lfs diff=lfs merge=lfs -text
14
- *.npz filter=lfs diff=lfs merge=lfs -text
15
- *.onnx filter=lfs diff=lfs merge=lfs -text
16
- *.ot filter=lfs diff=lfs merge=lfs -text
17
- *.parquet filter=lfs diff=lfs merge=lfs -text
18
- *.pb filter=lfs diff=lfs merge=lfs -text
19
- *.pickle filter=lfs diff=lfs merge=lfs -text
20
- *.pkl filter=lfs diff=lfs merge=lfs -text
21
- *.pt filter=lfs diff=lfs merge=lfs -text
22
- *.pth filter=lfs diff=lfs merge=lfs -text
23
- *.rar filter=lfs diff=lfs merge=lfs -text
24
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
- *.tar.* filter=lfs diff=lfs merge=lfs -text
26
- *.tflite filter=lfs diff=lfs merge=lfs -text
27
- *.tgz filter=lfs diff=lfs merge=lfs -text
28
- *.wasm filter=lfs diff=lfs merge=lfs -text
29
- *.xz filter=lfs diff=lfs merge=lfs -text
30
- *.zip filter=lfs diff=lfs merge=lfs -text
31
- *.zst filter=lfs diff=lfs merge=lfs -text
32
- *tfevents* filter=lfs diff=lfs merge=lfs -text
33
- checkpoint-10000/tokenizer.json filter=lfs diff=lfs merge=lfs -text
34
- checkpoint-12500/tokenizer.json filter=lfs diff=lfs merge=lfs -text
35
- checkpoint-15000/tokenizer.json filter=lfs diff=lfs merge=lfs -text
36
- checkpoint-20000/tokenizer.json filter=lfs diff=lfs merge=lfs -text
37
- checkpoint-2500/tokenizer.json filter=lfs diff=lfs merge=lfs -text
38
- checkpoint-25000/tokenizer.json filter=lfs diff=lfs merge=lfs -text
39
- checkpoint-5000/tokenizer.json filter=lfs diff=lfs merge=lfs -text
40
- checkpoint-7500/tokenizer.json filter=lfs diff=lfs merge=lfs -text
41
- tokenizer.json filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.gitignore DELETED
@@ -1,11 +0,0 @@
1
- */pilot_*/
2
- pilot_*/
3
- checkpoint-*/
4
- */pilot_*/
5
- pilot_*/
6
- checkpoint-*/
7
- */pilot_*/
8
- pilot_*/
9
- checkpoint-*/
10
- */pilot_*/
11
- pilot_*/
 
 
 
 
 
 
 
 
 
 
 
 
all_results.json DELETED
@@ -1,14 +0,0 @@
1
- {
2
- "epoch": 2.13,
3
- "eval_loss": 3.5801477432250977,
4
- "eval_runtime": 484.8289,
5
- "eval_samples": 4608,
6
- "eval_samples_per_second": 9.504,
7
- "eval_steps_per_second": 4.752,
8
- "perplexity": 35.87884131122737,
9
- "train_loss": 3.7238579296875,
10
- "train_runtime": 52437.7197,
11
- "train_samples": 94080,
12
- "train_samples_per_second": 3.814,
13
- "train_steps_per_second": 0.477
14
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-10000/config.json DELETED
@@ -1,39 +0,0 @@
1
- {
2
- "_name_or_path": "bigscience/bloom-350m",
3
- "adapters": {
4
- "adapters": {},
5
- "config_map": {},
6
- "fusion_config_map": {},
7
- "fusions": {}
8
- },
9
- "apply_residual_connection_post_layernorm": false,
10
- "architectures": [
11
- "BloomForCausalLM"
12
- ],
13
- "attention_dropout": 0.0,
14
- "attention_softmax_in_fp32": true,
15
- "bias_dropout_fusion": true,
16
- "bos_token_id": 1,
17
- "eos_token_id": 2,
18
- "hidden_dropout": 0.0,
19
- "hidden_size": 1024,
20
- "initializer_range": 0.02,
21
- "layer_norm_epsilon": 1e-05,
22
- "masked_softmax_fusion": true,
23
- "model_type": "bloom",
24
- "n_head": 16,
25
- "n_inner": null,
26
- "n_layer": 24,
27
- "offset_alibi": 100,
28
- "pad_token_id": 3,
29
- "pretraining_tp": 1,
30
- "seq_length": 2048,
31
- "skip_bias_add": true,
32
- "skip_bias_add_qkv": false,
33
- "slow_but_exact": false,
34
- "torch_dtype": "float32",
35
- "transformers_version": "4.20.0.dev0",
36
- "unk_token_id": 0,
37
- "use_cache": true,
38
- "vocab_size": 250880
39
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-10000/optimizer.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:265ff09d0d7e02defabfebb992d27553e6608e9f187ed59c40d682194d9182de
3
- size 2254269
 
 
 
 
checkpoint-10000/pytorch_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:c5d0e81d3ced31978dd8a1717bf280d51acf1b2e014447b159c53fb5c1abeaa8
3
- size 2236955191
 
 
 
 
checkpoint-10000/rng_state.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:674676e662eeb93778c2b153ffad13aa90b43355da1956ce0b1e01e72f48c8d7
3
- size 14503
 
 
 
 
checkpoint-10000/scheduler.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:ebae5cf74f470a9dc57b090feb9de29d57aa2d381061d1a61fd32b3c3221556b
3
- size 623
 
 
 
 
checkpoint-10000/special_tokens_map.json DELETED
@@ -1 +0,0 @@
1
- {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
 
 
checkpoint-10000/tokenizer.json DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:8f6efc66e73f1fd69da4f436e48befb519fdff3fe18910850c1d41bd862293a5
3
- size 14500443
 
 
 
 
checkpoint-10000/tokenizer_config.json DELETED
@@ -1 +0,0 @@
1
- {"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "<pad>", "add_prefix_space": false, "name_or_path": "bigscience/bloom-350m", "special_tokens_map_file": null, "padding_side": "left", "tokenizer_class": "BloomTokenizer"}
 
 
checkpoint-10000/trainer_state.json DELETED
@@ -1,56 +0,0 @@
1
- {
2
- "best_metric": 3.7074594497680664,
3
- "best_model_checkpoint": "/users/zyong2/data/zyong2/bigscience/data/processed/024/bloom-350m_az_bitfit_100000samples_-1vocab_original-frozen/checkpoint-10000",
4
- "epoch": 0.8503401360544217,
5
- "global_step": 10000,
6
- "is_hyper_param_search": false,
7
- "is_local_process_zero": true,
8
- "is_world_process_zero": true,
9
- "log_history": [
10
- {
11
- "epoch": 0.21,
12
- "learning_rate": 9e-05,
13
- "loss": 4.2702,
14
- "step": 2500
15
- },
16
- {
17
- "epoch": 0.43,
18
- "learning_rate": 8e-05,
19
- "loss": 3.9326,
20
- "step": 5000
21
- },
22
- {
23
- "epoch": 0.43,
24
- "eval_loss": 3.8811988830566406,
25
- "eval_runtime": 485.7533,
26
- "eval_samples_per_second": 9.486,
27
- "eval_steps_per_second": 4.743,
28
- "step": 5000
29
- },
30
- {
31
- "epoch": 0.64,
32
- "learning_rate": 7e-05,
33
- "loss": 3.7946,
34
- "step": 7500
35
- },
36
- {
37
- "epoch": 0.85,
38
- "learning_rate": 6e-05,
39
- "loss": 3.7099,
40
- "step": 10000
41
- },
42
- {
43
- "epoch": 0.85,
44
- "eval_loss": 3.7074594497680664,
45
- "eval_runtime": 487.18,
46
- "eval_samples_per_second": 9.459,
47
- "eval_steps_per_second": 4.729,
48
- "step": 10000
49
- }
50
- ],
51
- "max_steps": 25000,
52
- "num_train_epochs": 3,
53
- "total_flos": 1.4859311775744e+17,
54
- "trial_name": null,
55
- "trial_params": null
56
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-10000/training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d14f6f41cf21f30f5eb683a0b9414800094d15f898e0254a9c31393958aed209
3
- size 3375
 
 
 
 
checkpoint-10000/wikiann-az-results.txt DELETED
@@ -1,8 +0,0 @@
1
- ==================================================
2
- Results
3
- ==================================================
4
- Model: /users/zyong2/data/zyong2/bigscience/data/processed/024/bloom-350m_az_bitfit_100000samples_-1vocab_original-frozen/checkpoint-10000
5
- [0.3887915936952715, 0.36485532815808047, 0.36906854130052724, 0.3748698368622006, 0.39708939708939706, 0.39721739130434774, 0.3788668752172402, 0.37992831541218636, 0.361812521618817, 0.3837612768910479]
6
- 37.96
7
- 1.18
8
- ==================================================
 
 
 
 
 
 
 
 
 
checkpoint-12500/config.json DELETED
@@ -1,39 +0,0 @@
1
- {
2
- "_name_or_path": "bigscience/bloom-350m",
3
- "adapters": {
4
- "adapters": {},
5
- "config_map": {},
6
- "fusion_config_map": {},
7
- "fusions": {}
8
- },
9
- "apply_residual_connection_post_layernorm": false,
10
- "architectures": [
11
- "BloomForCausalLM"
12
- ],
13
- "attention_dropout": 0.0,
14
- "attention_softmax_in_fp32": true,
15
- "bias_dropout_fusion": true,
16
- "bos_token_id": 1,
17
- "eos_token_id": 2,
18
- "hidden_dropout": 0.0,
19
- "hidden_size": 1024,
20
- "initializer_range": 0.02,
21
- "layer_norm_epsilon": 1e-05,
22
- "masked_softmax_fusion": true,
23
- "model_type": "bloom",
24
- "n_head": 16,
25
- "n_inner": null,
26
- "n_layer": 24,
27
- "offset_alibi": 100,
28
- "pad_token_id": 3,
29
- "pretraining_tp": 1,
30
- "seq_length": 2048,
31
- "skip_bias_add": true,
32
- "skip_bias_add_qkv": false,
33
- "slow_but_exact": false,
34
- "torch_dtype": "float32",
35
- "transformers_version": "4.20.0.dev0",
36
- "unk_token_id": 0,
37
- "use_cache": true,
38
- "vocab_size": 250880
39
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-12500/optimizer.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:07596e05ce7be5596893cfe284dcd6e75b3e1b0cc9c8e73fdce06c61be54c644
3
- size 2254269
 
 
 
 
checkpoint-12500/pytorch_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:0ba9d8972e149d1195aa73c6870d8922e3881565ca843eb16ab4f2f87f626f74
3
- size 2236955191
 
 
 
 
checkpoint-12500/rng_state.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:84838ccd45951bbe1e5f964cffc9f60572ee4f5e111f59de86b1a56f3d7a6bd8
3
- size 14503
 
 
 
 
checkpoint-12500/scheduler.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d270c6e3000cbdb534f7db7e774ca17393c2523690c8058754d752dd5b11a93a
3
- size 623
 
 
 
 
checkpoint-12500/special_tokens_map.json DELETED
@@ -1 +0,0 @@
1
- {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
 
 
checkpoint-12500/tokenizer.json DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:8f6efc66e73f1fd69da4f436e48befb519fdff3fe18910850c1d41bd862293a5
3
- size 14500443
 
 
 
 
checkpoint-12500/tokenizer_config.json DELETED
@@ -1 +0,0 @@
1
- {"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "<pad>", "add_prefix_space": false, "name_or_path": "bigscience/bloom-350m", "special_tokens_map_file": null, "padding_side": "left", "tokenizer_class": "BloomTokenizer"}
 
 
checkpoint-12500/trainer_state.json DELETED
@@ -1,86 +0,0 @@
1
- {
2
- "best_metric": 3.255730152130127,
3
- "best_model_checkpoint": "/users/zyong2/data/zyong2/bigscience/data/processed/024/bloom-350m_az_bitfit_100000samples_-1vocab_original-frozen/checkpoint-12500",
4
- "epoch": 1.0629251700680271,
5
- "global_step": 12500,
6
- "is_hyper_param_search": false,
7
- "is_local_process_zero": true,
8
- "is_world_process_zero": true,
9
- "log_history": [
10
- {
11
- "epoch": 0.21,
12
- "learning_rate": 0.0008,
13
- "loss": 3.7096,
14
- "step": 2500
15
- },
16
- {
17
- "epoch": 0.21,
18
- "eval_loss": 3.477477550506592,
19
- "eval_runtime": 486.1756,
20
- "eval_samples_per_second": 9.478,
21
- "eval_steps_per_second": 4.739,
22
- "step": 2500
23
- },
24
- {
25
- "epoch": 0.43,
26
- "learning_rate": 0.0006,
27
- "loss": 3.3667,
28
- "step": 5000
29
- },
30
- {
31
- "epoch": 0.43,
32
- "eval_loss": 3.3522424697875977,
33
- "eval_runtime": 486.8305,
34
- "eval_samples_per_second": 9.465,
35
- "eval_steps_per_second": 4.733,
36
- "step": 5000
37
- },
38
- {
39
- "epoch": 0.64,
40
- "learning_rate": 0.0004,
41
- "loss": 3.29,
42
- "step": 7500
43
- },
44
- {
45
- "epoch": 0.64,
46
- "eval_loss": 3.3035173416137695,
47
- "eval_runtime": 485.9329,
48
- "eval_samples_per_second": 9.483,
49
- "eval_steps_per_second": 4.741,
50
- "step": 7500
51
- },
52
- {
53
- "epoch": 0.85,
54
- "learning_rate": 0.0002,
55
- "loss": 3.2496,
56
- "step": 10000
57
- },
58
- {
59
- "epoch": 0.85,
60
- "eval_loss": 3.269397497177124,
61
- "eval_runtime": 485.4366,
62
- "eval_samples_per_second": 9.492,
63
- "eval_steps_per_second": 4.746,
64
- "step": 10000
65
- },
66
- {
67
- "epoch": 1.06,
68
- "learning_rate": 0.0,
69
- "loss": 3.2319,
70
- "step": 12500
71
- },
72
- {
73
- "epoch": 1.06,
74
- "eval_loss": 3.255730152130127,
75
- "eval_runtime": 485.5726,
76
- "eval_samples_per_second": 9.49,
77
- "eval_steps_per_second": 4.745,
78
- "step": 12500
79
- }
80
- ],
81
- "max_steps": 12500,
82
- "num_train_epochs": 2,
83
- "total_flos": 1.857413971968e+17,
84
- "trial_name": null,
85
- "trial_params": null
86
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-12500/training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:3803a6357491b3295e37b75071a44122ab2d037bb565a91e83c16087b12fef18
3
- size 3375
 
 
 
 
checkpoint-12500/wikiann-az-results.txt DELETED
@@ -1,8 +0,0 @@
1
- ==================================================
2
- Results
3
- ==================================================
4
- Model: /users/zyong2/data/zyong2/bigscience/data/processed/024/bloom-350m_az_bitfit_100000samples_-1vocab_original-frozen/checkpoint-12500
5
- [0.3867132867132867, 0.35957921581128466, 0.3694581280788177, 0.37612768910478833, 0.39779005524861877, 0.3986135181975736, 0.36903939184519696, 0.3745907007203667, 0.36721991701244816, 0.38253638253638256]
6
- 37.82
7
- 1.24
8
- ==================================================
 
 
 
 
 
 
 
 
 
checkpoint-15000/config.json DELETED
@@ -1,39 +0,0 @@
1
- {
2
- "_name_or_path": "bigscience/bloom-350m",
3
- "adapters": {
4
- "adapters": {},
5
- "config_map": {},
6
- "fusion_config_map": {},
7
- "fusions": {}
8
- },
9
- "apply_residual_connection_post_layernorm": false,
10
- "architectures": [
11
- "BloomForCausalLM"
12
- ],
13
- "attention_dropout": 0.0,
14
- "attention_softmax_in_fp32": true,
15
- "bias_dropout_fusion": true,
16
- "bos_token_id": 1,
17
- "eos_token_id": 2,
18
- "hidden_dropout": 0.0,
19
- "hidden_size": 1024,
20
- "initializer_range": 0.02,
21
- "layer_norm_epsilon": 1e-05,
22
- "masked_softmax_fusion": true,
23
- "model_type": "bloom",
24
- "n_head": 16,
25
- "n_inner": null,
26
- "n_layer": 24,
27
- "offset_alibi": 100,
28
- "pad_token_id": 3,
29
- "pretraining_tp": 1,
30
- "seq_length": 2048,
31
- "skip_bias_add": true,
32
- "skip_bias_add_qkv": false,
33
- "slow_but_exact": false,
34
- "torch_dtype": "float32",
35
- "transformers_version": "4.20.0.dev0",
36
- "unk_token_id": 0,
37
- "use_cache": true,
38
- "vocab_size": 250880
39
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-15000/optimizer.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a395cb43c406f8705960f0663d5fd2c4c092932797966f26d35e97952836ca33
3
- size 2254269
 
 
 
 
checkpoint-15000/pytorch_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:062822f9e2819dc1e9393a95d333476433854c3dcf75ada844ef378ee9204148
3
- size 2236955191
 
 
 
 
checkpoint-15000/rng_state.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:69a15e581e7afd6fd12d6dddef3da31e19b3dd058003b5c5d00781b54e093f7c
3
- size 14503
 
 
 
 
checkpoint-15000/scheduler.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:301727affc0c0a4c1f25106f7fd12c059ede0526ba52733c25be949ad3bc04d7
3
- size 623
 
 
 
 
checkpoint-15000/special_tokens_map.json DELETED
@@ -1 +0,0 @@
1
- {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
 
 
checkpoint-15000/tokenizer.json DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:8f6efc66e73f1fd69da4f436e48befb519fdff3fe18910850c1d41bd862293a5
3
- size 14500443
 
 
 
 
checkpoint-15000/tokenizer_config.json DELETED
@@ -1 +0,0 @@
1
- {"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "<pad>", "add_prefix_space": false, "name_or_path": "bigscience/bloom-350m", "special_tokens_map_file": null, "padding_side": "left", "tokenizer_class": "BloomTokenizer"}
 
 
checkpoint-15000/trainer_state.json DELETED
@@ -1,76 +0,0 @@
1
- {
2
- "best_metric": 3.6284306049346924,
3
- "best_model_checkpoint": "/users/zyong2/data/zyong2/bigscience/data/processed/024/bloom-350m_az_bitfit_100000samples_-1vocab_original-frozen/checkpoint-15000",
4
- "epoch": 1.2755102040816326,
5
- "global_step": 15000,
6
- "is_hyper_param_search": false,
7
- "is_local_process_zero": true,
8
- "is_world_process_zero": true,
9
- "log_history": [
10
- {
11
- "epoch": 0.21,
12
- "learning_rate": 9e-05,
13
- "loss": 4.2702,
14
- "step": 2500
15
- },
16
- {
17
- "epoch": 0.43,
18
- "learning_rate": 8e-05,
19
- "loss": 3.9326,
20
- "step": 5000
21
- },
22
- {
23
- "epoch": 0.43,
24
- "eval_loss": 3.8811988830566406,
25
- "eval_runtime": 485.7533,
26
- "eval_samples_per_second": 9.486,
27
- "eval_steps_per_second": 4.743,
28
- "step": 5000
29
- },
30
- {
31
- "epoch": 0.64,
32
- "learning_rate": 7e-05,
33
- "loss": 3.7946,
34
- "step": 7500
35
- },
36
- {
37
- "epoch": 0.85,
38
- "learning_rate": 6e-05,
39
- "loss": 3.7099,
40
- "step": 10000
41
- },
42
- {
43
- "epoch": 0.85,
44
- "eval_loss": 3.7074594497680664,
45
- "eval_runtime": 487.18,
46
- "eval_samples_per_second": 9.459,
47
- "eval_steps_per_second": 4.729,
48
- "step": 10000
49
- },
50
- {
51
- "epoch": 1.06,
52
- "learning_rate": 5e-05,
53
- "loss": 3.657,
54
- "step": 12500
55
- },
56
- {
57
- "epoch": 1.28,
58
- "learning_rate": 4e-05,
59
- "loss": 3.6085,
60
- "step": 15000
61
- },
62
- {
63
- "epoch": 1.28,
64
- "eval_loss": 3.6284306049346924,
65
- "eval_runtime": 484.984,
66
- "eval_samples_per_second": 9.501,
67
- "eval_steps_per_second": 4.751,
68
- "step": 15000
69
- }
70
- ],
71
- "max_steps": 25000,
72
- "num_train_epochs": 3,
73
- "total_flos": 2.2288967663616e+17,
74
- "trial_name": null,
75
- "trial_params": null
76
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-15000/training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d14f6f41cf21f30f5eb683a0b9414800094d15f898e0254a9c31393958aed209
3
- size 3375
 
 
 
 
checkpoint-20000/config.json DELETED
@@ -1,39 +0,0 @@
1
- {
2
- "_name_or_path": "bigscience/bloom-350m",
3
- "adapters": {
4
- "adapters": {},
5
- "config_map": {},
6
- "fusion_config_map": {},
7
- "fusions": {}
8
- },
9
- "apply_residual_connection_post_layernorm": false,
10
- "architectures": [
11
- "BloomForCausalLM"
12
- ],
13
- "attention_dropout": 0.0,
14
- "attention_softmax_in_fp32": true,
15
- "bias_dropout_fusion": true,
16
- "bos_token_id": 1,
17
- "eos_token_id": 2,
18
- "hidden_dropout": 0.0,
19
- "hidden_size": 1024,
20
- "initializer_range": 0.02,
21
- "layer_norm_epsilon": 1e-05,
22
- "masked_softmax_fusion": true,
23
- "model_type": "bloom",
24
- "n_head": 16,
25
- "n_inner": null,
26
- "n_layer": 24,
27
- "offset_alibi": 100,
28
- "pad_token_id": 3,
29
- "pretraining_tp": 1,
30
- "seq_length": 2048,
31
- "skip_bias_add": true,
32
- "skip_bias_add_qkv": false,
33
- "slow_but_exact": false,
34
- "torch_dtype": "float32",
35
- "transformers_version": "4.20.0.dev0",
36
- "unk_token_id": 0,
37
- "use_cache": true,
38
- "vocab_size": 250880
39
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-20000/optimizer.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:38839006d5b75b018803308ec39f56f6a164acda29a0f2fede066fe4236ef866
3
- size 2254269
 
 
 
 
checkpoint-20000/pytorch_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:ac58a7747254733f3a05bd54e93799e165c95d1762f7c9bb25d3f6f384b24ce6
3
- size 2236955191
 
 
 
 
checkpoint-20000/rng_state.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:c2ab9b52e364e49a6593a765f851264b1dd5e9b0207961295e70ed4788ef648c
3
- size 14503
 
 
 
 
checkpoint-20000/scheduler.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:69341a1831197b0345e31eaac56abf9ad4527cc56eba4b526818b4ffb6ef6dad
3
- size 623
 
 
 
 
checkpoint-20000/special_tokens_map.json DELETED
@@ -1 +0,0 @@
1
- {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
 
 
checkpoint-20000/tokenizer.json DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:8f6efc66e73f1fd69da4f436e48befb519fdff3fe18910850c1d41bd862293a5
3
- size 14500443
 
 
 
 
checkpoint-20000/tokenizer_config.json DELETED
@@ -1 +0,0 @@
1
- {"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "<pad>", "add_prefix_space": false, "name_or_path": "bigscience/bloom-350m", "special_tokens_map_file": null, "padding_side": "left", "tokenizer_class": "BloomTokenizer"}
 
 
checkpoint-20000/trainer_state.json DELETED
@@ -1,96 +0,0 @@
1
- {
2
- "best_metric": 3.5915658473968506,
3
- "best_model_checkpoint": "/users/zyong2/data/zyong2/bigscience/data/processed/024/bloom-350m_az_bitfit_100000samples_-1vocab_original-frozen/checkpoint-20000",
4
- "epoch": 1.7006802721088436,
5
- "global_step": 20000,
6
- "is_hyper_param_search": false,
7
- "is_local_process_zero": true,
8
- "is_world_process_zero": true,
9
- "log_history": [
10
- {
11
- "epoch": 0.21,
12
- "learning_rate": 9e-05,
13
- "loss": 4.2702,
14
- "step": 2500
15
- },
16
- {
17
- "epoch": 0.43,
18
- "learning_rate": 8e-05,
19
- "loss": 3.9326,
20
- "step": 5000
21
- },
22
- {
23
- "epoch": 0.43,
24
- "eval_loss": 3.8811988830566406,
25
- "eval_runtime": 485.7533,
26
- "eval_samples_per_second": 9.486,
27
- "eval_steps_per_second": 4.743,
28
- "step": 5000
29
- },
30
- {
31
- "epoch": 0.64,
32
- "learning_rate": 7e-05,
33
- "loss": 3.7946,
34
- "step": 7500
35
- },
36
- {
37
- "epoch": 0.85,
38
- "learning_rate": 6e-05,
39
- "loss": 3.7099,
40
- "step": 10000
41
- },
42
- {
43
- "epoch": 0.85,
44
- "eval_loss": 3.7074594497680664,
45
- "eval_runtime": 487.18,
46
- "eval_samples_per_second": 9.459,
47
- "eval_steps_per_second": 4.729,
48
- "step": 10000
49
- },
50
- {
51
- "epoch": 1.06,
52
- "learning_rate": 5e-05,
53
- "loss": 3.657,
54
- "step": 12500
55
- },
56
- {
57
- "epoch": 1.28,
58
- "learning_rate": 4e-05,
59
- "loss": 3.6085,
60
- "step": 15000
61
- },
62
- {
63
- "epoch": 1.28,
64
- "eval_loss": 3.6284306049346924,
65
- "eval_runtime": 484.984,
66
- "eval_samples_per_second": 9.501,
67
- "eval_steps_per_second": 4.751,
68
- "step": 15000
69
- },
70
- {
71
- "epoch": 1.49,
72
- "learning_rate": 3e-05,
73
- "loss": 3.5817,
74
- "step": 17500
75
- },
76
- {
77
- "epoch": 1.7,
78
- "learning_rate": 2e-05,
79
- "loss": 3.5696,
80
- "step": 20000
81
- },
82
- {
83
- "epoch": 1.7,
84
- "eval_loss": 3.5915658473968506,
85
- "eval_runtime": 485.8978,
86
- "eval_samples_per_second": 9.483,
87
- "eval_steps_per_second": 4.742,
88
- "step": 20000
89
- }
90
- ],
91
- "max_steps": 25000,
92
- "num_train_epochs": 3,
93
- "total_flos": 2.9718623551488e+17,
94
- "trial_name": null,
95
- "trial_params": null
96
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-20000/training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d14f6f41cf21f30f5eb683a0b9414800094d15f898e0254a9c31393958aed209
3
- size 3375
 
 
 
 
checkpoint-2500/config.json DELETED
@@ -1,39 +0,0 @@
1
- {
2
- "_name_or_path": "bigscience/bloom-350m",
3
- "adapters": {
4
- "adapters": {},
5
- "config_map": {},
6
- "fusion_config_map": {},
7
- "fusions": {}
8
- },
9
- "apply_residual_connection_post_layernorm": false,
10
- "architectures": [
11
- "BloomForCausalLM"
12
- ],
13
- "attention_dropout": 0.0,
14
- "attention_softmax_in_fp32": true,
15
- "bias_dropout_fusion": true,
16
- "bos_token_id": 1,
17
- "eos_token_id": 2,
18
- "hidden_dropout": 0.0,
19
- "hidden_size": 1024,
20
- "initializer_range": 0.02,
21
- "layer_norm_epsilon": 1e-05,
22
- "masked_softmax_fusion": true,
23
- "model_type": "bloom",
24
- "n_head": 16,
25
- "n_inner": null,
26
- "n_layer": 24,
27
- "offset_alibi": 100,
28
- "pad_token_id": 3,
29
- "pretraining_tp": 1,
30
- "seq_length": 2048,
31
- "skip_bias_add": true,
32
- "skip_bias_add_qkv": false,
33
- "slow_but_exact": false,
34
- "torch_dtype": "float32",
35
- "transformers_version": "4.20.0.dev0",
36
- "unk_token_id": 0,
37
- "use_cache": true,
38
- "vocab_size": 250880
39
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-2500/optimizer.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:802b6e19dbabf54c1fc21cbda7ea52049caf2243dceb34c4b67b12148177fac0
3
- size 2254269
 
 
 
 
checkpoint-2500/pytorch_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:86d1f7b456cd8b5855dbd53324a63b25a21cd79f0d13da5e27de78a46f098406
3
- size 2236955191
 
 
 
 
checkpoint-2500/rng_state.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:03a3c1141f8bebf59967bea65fa021fcc2ad8a3d7753ae183d1d946d3f5b6d8e
3
- size 14503
 
 
 
 
checkpoint-2500/scheduler.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:8848be77d5e16f8ad560a7262091b3d3fcd8d0f3fa50682054480c93bc684fe6
3
- size 623