yongzx commited on
Commit
5960ca4
1 Parent(s): 3c07072

rm --cached *

Browse files
.gitattributes DELETED
@@ -1,33 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ftz filter=lfs diff=lfs merge=lfs -text
6
- *.gz filter=lfs diff=lfs merge=lfs -text
7
- *.h5 filter=lfs diff=lfs merge=lfs -text
8
- *.joblib filter=lfs diff=lfs merge=lfs -text
9
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
11
- *.model filter=lfs diff=lfs merge=lfs -text
12
- *.msgpack filter=lfs diff=lfs merge=lfs -text
13
- *.npy filter=lfs diff=lfs merge=lfs -text
14
- *.npz filter=lfs diff=lfs merge=lfs -text
15
- *.onnx filter=lfs diff=lfs merge=lfs -text
16
- *.ot filter=lfs diff=lfs merge=lfs -text
17
- *.parquet filter=lfs diff=lfs merge=lfs -text
18
- *.pb filter=lfs diff=lfs merge=lfs -text
19
- *.pickle filter=lfs diff=lfs merge=lfs -text
20
- *.pkl filter=lfs diff=lfs merge=lfs -text
21
- *.pt filter=lfs diff=lfs merge=lfs -text
22
- *.pth filter=lfs diff=lfs merge=lfs -text
23
- *.rar filter=lfs diff=lfs merge=lfs -text
24
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
- *.tar.* filter=lfs diff=lfs merge=lfs -text
26
- *.tflite filter=lfs diff=lfs merge=lfs -text
27
- *.tgz filter=lfs diff=lfs merge=lfs -text
28
- *.wasm filter=lfs diff=lfs merge=lfs -text
29
- *.xz filter=lfs diff=lfs merge=lfs -text
30
- *.zip filter=lfs diff=lfs merge=lfs -text
31
- *.zst filter=lfs diff=lfs merge=lfs -text
32
- *tfevents* filter=lfs diff=lfs merge=lfs -text
33
- tokenizer.json filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.gitignore DELETED
@@ -1,15 +0,0 @@
1
- checkpoint-*/
2
- */pilot_*/
3
- pilot_*/
4
- checkpoint-*/
5
- */pilot_*/
6
- pilot_*/
7
- checkpoint-*/
8
- */pilot_*/
9
- pilot_*/
10
- checkpoint-*/
11
- */pilot_*/
12
- pilot_*/
13
- checkpoint-*/
14
- */pilot_*/
15
- pilot_*/
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
all_results.json DELETED
@@ -1,14 +0,0 @@
1
- {
2
- "epoch": 19.86,
3
- "eval_loss": 2.2680606842041016,
4
- "eval_runtime": 511.7929,
5
- "eval_samples": 4906,
6
- "eval_samples_per_second": 9.586,
7
- "eval_steps_per_second": 4.793,
8
- "perplexity": 9.660647589269308,
9
- "train_loss": 2.30218890625,
10
- "train_runtime": 60621.0247,
11
- "train_samples": 10000,
12
- "train_samples_per_second": 3.299,
13
- "train_steps_per_second": 0.412
14
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
config.json DELETED
@@ -1,39 +0,0 @@
1
- {
2
- "_name_or_path": "bigscience/bloom-350m",
3
- "adapters": {
4
- "adapters": {},
5
- "config_map": {},
6
- "fusion_config_map": {},
7
- "fusions": {}
8
- },
9
- "apply_residual_connection_post_layernorm": false,
10
- "architectures": [
11
- "BloomForCausalLM"
12
- ],
13
- "attention_dropout": 0.0,
14
- "attention_softmax_in_fp32": true,
15
- "bias_dropout_fusion": true,
16
- "bos_token_id": 1,
17
- "eos_token_id": 2,
18
- "hidden_dropout": 0.0,
19
- "hidden_size": 1024,
20
- "initializer_range": 0.02,
21
- "layer_norm_epsilon": 1e-05,
22
- "masked_softmax_fusion": true,
23
- "model_type": "bloom",
24
- "n_head": 16,
25
- "n_inner": null,
26
- "n_layer": 24,
27
- "offset_alibi": 100,
28
- "pad_token_id": 3,
29
- "pretraining_tp": 1,
30
- "seq_length": 2048,
31
- "skip_bias_add": true,
32
- "skip_bias_add_qkv": false,
33
- "slow_but_exact": false,
34
- "torch_dtype": "float32",
35
- "transformers_version": "4.20.0.dev0",
36
- "unk_token_id": 0,
37
- "use_cache": true,
38
- "vocab_size": 250880
39
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
eval_results.json DELETED
@@ -1,9 +0,0 @@
1
- {
2
- "epoch": 19.86,
3
- "eval_loss": 2.2680606842041016,
4
- "eval_runtime": 511.7929,
5
- "eval_samples": 4906,
6
- "eval_samples_per_second": 9.586,
7
- "eval_steps_per_second": 4.793,
8
- "perplexity": 9.660647589269308
9
- }
 
 
 
 
 
 
 
 
 
 
pytorch_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:3390219300ee6c457de0c79a3a1b36b0b31089bc503e9c7263ad881a98cfe9a3
3
- size 2236955191
 
 
 
 
special_tokens_map.json DELETED
@@ -1 +0,0 @@
1
- {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
 
 
tokenizer.json DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:8f6efc66e73f1fd69da4f436e48befb519fdff3fe18910850c1d41bd862293a5
3
- size 14500443
 
 
 
 
tokenizer_config.json DELETED
@@ -1 +0,0 @@
1
- {"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "<pad>", "add_prefix_space": false, "name_or_path": "bigscience/bloom-350m", "special_tokens_map_file": null, "padding_side": "left", "tokenizer_class": "BloomTokenizer"}
 
 
train_results.json DELETED
@@ -1,8 +0,0 @@
1
- {
2
- "epoch": 19.86,
3
- "train_loss": 2.30218890625,
4
- "train_runtime": 60621.0247,
5
- "train_samples": 10000,
6
- "train_samples_per_second": 3.299,
7
- "train_steps_per_second": 0.412
8
- }
 
 
 
 
 
 
 
 
 
trainer_state.json DELETED
@@ -1,125 +0,0 @@
1
- {
2
- "best_metric": 2.2680606842041016,
3
- "best_model_checkpoint": "/users/zyong2/data/zyong2/bigscience/data/processed/024/bloom-350m_az_fish_10000samples_-1vocab_original-frozen/checkpoint-25000",
4
- "epoch": 19.856519150625125,
5
- "global_step": 25000,
6
- "is_hyper_param_search": false,
7
- "is_local_process_zero": true,
8
- "is_world_process_zero": true,
9
- "log_history": [
10
- {
11
- "epoch": 1.99,
12
- "learning_rate": 9e-05,
13
- "loss": 2.9608,
14
- "step": 2500
15
- },
16
- {
17
- "epoch": 3.97,
18
- "learning_rate": 8e-05,
19
- "loss": 2.5218,
20
- "step": 5000
21
- },
22
- {
23
- "epoch": 3.97,
24
- "eval_loss": 2.484114170074463,
25
- "eval_runtime": 513.9237,
26
- "eval_samples_per_second": 9.546,
27
- "eval_steps_per_second": 4.773,
28
- "step": 5000
29
- },
30
- {
31
- "epoch": 5.96,
32
- "learning_rate": 7e-05,
33
- "loss": 2.379,
34
- "step": 7500
35
- },
36
- {
37
- "epoch": 7.94,
38
- "learning_rate": 6e-05,
39
- "loss": 2.2927,
40
- "step": 10000
41
- },
42
- {
43
- "epoch": 7.94,
44
- "eval_loss": 2.3578550815582275,
45
- "eval_runtime": 514.0611,
46
- "eval_samples_per_second": 9.544,
47
- "eval_steps_per_second": 4.772,
48
- "step": 10000
49
- },
50
- {
51
- "epoch": 9.93,
52
- "learning_rate": 5e-05,
53
- "loss": 2.2308,
54
- "step": 12500
55
- },
56
- {
57
- "epoch": 11.91,
58
- "learning_rate": 4e-05,
59
- "loss": 2.1853,
60
- "step": 15000
61
- },
62
- {
63
- "epoch": 11.91,
64
- "eval_loss": 2.305732488632202,
65
- "eval_runtime": 512.0621,
66
- "eval_samples_per_second": 9.581,
67
- "eval_steps_per_second": 4.79,
68
- "step": 15000
69
- },
70
- {
71
- "epoch": 13.9,
72
- "learning_rate": 3e-05,
73
- "loss": 2.1504,
74
- "step": 17500
75
- },
76
- {
77
- "epoch": 15.89,
78
- "learning_rate": 2e-05,
79
- "loss": 2.1224,
80
- "step": 20000
81
- },
82
- {
83
- "epoch": 15.89,
84
- "eval_loss": 2.276737928390503,
85
- "eval_runtime": 511.8831,
86
- "eval_samples_per_second": 9.584,
87
- "eval_steps_per_second": 4.792,
88
- "step": 20000
89
- },
90
- {
91
- "epoch": 17.87,
92
- "learning_rate": 1e-05,
93
- "loss": 2.096,
94
- "step": 22500
95
- },
96
- {
97
- "epoch": 19.86,
98
- "learning_rate": 0.0,
99
- "loss": 2.0827,
100
- "step": 25000
101
- },
102
- {
103
- "epoch": 19.86,
104
- "eval_loss": 2.2680606842041016,
105
- "eval_runtime": 511.8308,
106
- "eval_samples_per_second": 9.585,
107
- "eval_steps_per_second": 4.793,
108
- "step": 25000
109
- },
110
- {
111
- "epoch": 19.86,
112
- "step": 25000,
113
- "total_flos": 3.7165924872093696e+17,
114
- "train_loss": 2.30218890625,
115
- "train_runtime": 60621.0247,
116
- "train_samples_per_second": 3.299,
117
- "train_steps_per_second": 0.412
118
- }
119
- ],
120
- "max_steps": 25000,
121
- "num_train_epochs": 20,
122
- "total_flos": 3.7165924872093696e+17,
123
- "trial_name": null,
124
- "trial_params": null
125
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:ee1ddaaffb59b13be3134b1f95b5d2678f076190a29d2648a4405c1d042422dc
3
- size 3375
 
 
 
 
wikiann-az-results.txt DELETED
@@ -1,8 +0,0 @@
1
- ==================================================
2
- Results
3
- ==================================================
4
- Model: /users/zyong2/data/zyong2/bigscience/data/processed/024/bloom-350m_az_fish_10000samples_-1vocab_original-frozen/
5
- [0.398034398034398, 0.37980085348506404, 0.37650389242745935, 0.39167862266857967, 0.39313795568263044, 0.36518650088809945, 0.3641496118560339, 0.37324929971988796, 0.3589920607525025, 0.38878907653611217]
6
- 37.90
7
- 1.29
8
- ==================================================
 
 
 
 
 
 
 
 
 
word_embeddings.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:174ed618237771e5906be0e8d70c568de63633f3bb5e8a1e303bbdbaeaedc1ca
3
- size 1027605867
 
 
 
 
word_embeddings_layernorm.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:10917f86841a4f322406bd72ba2e4ae8e4780aaf462c98a76eca01e0c5fbc893
3
- size 9703